Jim Wu 2 lat temu
rodzic
commit
b324425b07
100 zmienionych plików z 9479 dodań i 0 usunięć
  1. 49 0
      .gitignore
  2. 370 0
      build.gradle
  3. BIN
      doc/1-Management/project/CCFrame移动开发架构2023.pptx
  4. BIN
      doc/4-Other/redis-6.0.3-win64/msys-2.0.dll
  5. BIN
      doc/4-Other/redis-6.0.3-win64/redis-check-aof.exe
  6. BIN
      doc/4-Other/redis-6.0.3-win64/redis-check-rdb.exe
  7. BIN
      doc/4-Other/redis-6.0.3-win64/redis-cli.exe
  8. BIN
      doc/4-Other/redis-6.0.3-win64/redis-server.exe
  9. 4 0
      doc/4-Other/redis-6.0.3-win64/redis.bat
  10. 1811 0
      doc/4-Other/redis-6.0.3-win64/redis.conf
  11. 172 0
      gradlew
  12. 84 0
      gradlew.bat
  13. 10 0
      settings.gradle
  14. 148 0
      src/main/java/net/coobird/thumbnailator/filters/Border.java
  15. 280 0
      src/main/java/net/oschina/j2cache/redis/RedisGenericCache.java
  16. 101 0
      src/main/java/org/ccframe/app/App.java
  17. 28 0
      src/main/java/org/ccframe/app/CoreConfig.java
  18. 96 0
      src/main/java/org/ccframe/app/DataSourceConfig.java
  19. 36 0
      src/main/java/org/ccframe/app/ElasticsearchConfig.java
  20. 63 0
      src/main/java/org/ccframe/app/ElasticsearchEmbeddedConfig.java
  21. 20 0
      src/main/java/org/ccframe/app/ErrorPageConfig.java
  22. 96 0
      src/main/java/org/ccframe/app/GlobalExceptionConfig.java
  23. 43 0
      src/main/java/org/ccframe/app/LocaleConfig.java
  24. 18 0
      src/main/java/org/ccframe/app/MethodSecurityConfig.java
  25. 30 0
      src/main/java/org/ccframe/app/MinioConfig.java
  26. 12 0
      src/main/java/org/ccframe/app/ProcessorConfig.java
  27. 27 0
      src/main/java/org/ccframe/app/QuartzConfiguration.java
  28. 73 0
      src/main/java/org/ccframe/app/RedissonConfig.java
  29. 50 0
      src/main/java/org/ccframe/app/SecurityConfigurer.java
  30. 112 0
      src/main/java/org/ccframe/app/WebMvcConfigurer.java
  31. 7 0
      src/main/java/org/ccframe/commons/auth/HasRoleSet.java
  32. 35 0
      src/main/java/org/ccframe/commons/auth/JwtAuthenticationToken.java
  33. 79 0
      src/main/java/org/ccframe/commons/auth/JwtHeadFilter.java
  34. 79 0
      src/main/java/org/ccframe/commons/auth/JwtUser.java
  35. 18 0
      src/main/java/org/ccframe/commons/auth/RoleAuth.java
  36. 89 0
      src/main/java/org/ccframe/commons/auth/RoleAuthAnnotationSecurityMetadataSource.java
  37. 83 0
      src/main/java/org/ccframe/commons/auth/TokenUser.java
  38. 50 0
      src/main/java/org/ccframe/commons/base/AggregationField.java
  39. 96 0
      src/main/java/org/ccframe/commons/base/BaseEntity.java
  40. 13 0
      src/main/java/org/ccframe/commons/base/BaseRepository.java
  41. 10 0
      src/main/java/org/ccframe/commons/base/BaseSearchRepository.java
  42. 569 0
      src/main/java/org/ccframe/commons/base/BaseSearchService.java
  43. 378 0
      src/main/java/org/ccframe/commons/base/BaseService.java
  44. 12 0
      src/main/java/org/ccframe/commons/base/DisableCache.java
  45. 8 0
      src/main/java/org/ccframe/commons/base/ICodeEnum.java
  46. 11 0
      src/main/java/org/ccframe/commons/base/IHasSearchBuilder.java
  47. 9 0
      src/main/java/org/ccframe/commons/base/IProcessor.java
  48. 116 0
      src/main/java/org/ccframe/commons/base/OffsetBasedPageRequest.java
  49. 22 0
      src/main/java/org/ccframe/commons/base/OperationLogMapper.java
  50. 48 0
      src/main/java/org/ccframe/commons/base/RedisIDGenerator.java
  51. 49 0
      src/main/java/org/ccframe/commons/base/SearchRange.java
  52. 20 0
      src/main/java/org/ccframe/commons/base/TreeNodeTree.java
  53. 232 0
      src/main/java/org/ccframe/commons/data/JExcelWriter.java
  54. 141 0
      src/main/java/org/ccframe/commons/dbunit/DBUnitExport.java
  55. 8 0
      src/main/java/org/ccframe/commons/dbunit/DatabaseOperationEx.java
  56. 123 0
      src/main/java/org/ccframe/commons/dbunit/InitFileReplacementProcessor.java
  57. 48 0
      src/main/java/org/ccframe/commons/dbunit/InitImageSaveRow.java
  58. 158 0
      src/main/java/org/ccframe/commons/dbunit/RandomReplacementProcessor.java
  59. 105 0
      src/main/java/org/ccframe/commons/dbunit/ReplacementDataSet.java
  60. 14 0
      src/main/java/org/ccframe/commons/dbunit/ReplacementProcessor.java
  61. 141 0
      src/main/java/org/ccframe/commons/dbunit/ReplacementTable.java
  62. 67 0
      src/main/java/org/ccframe/commons/filter/CcRequestLoggingFilter.java
  63. 47 0
      src/main/java/org/ccframe/commons/helper/EntityOperationListener.java
  64. 25 0
      src/main/java/org/ccframe/commons/helper/SpringContextHelper.java
  65. 335 0
      src/main/java/org/ccframe/commons/helper/SysInitBeanHelper.java
  66. 47 0
      src/main/java/org/ccframe/commons/helper/ValidateCodeHelper.java
  67. 54 0
      src/main/java/org/ccframe/commons/helper/ValueLockHelper.java
  68. 20 0
      src/main/java/org/ccframe/commons/helper/ValueWrapper.java
  69. 50 0
      src/main/java/org/ccframe/commons/jpaquery/Criteria.java
  70. 17 0
      src/main/java/org/ccframe/commons/jpaquery/Criterion.java
  71. 39 0
      src/main/java/org/ccframe/commons/jpaquery/LogicalExpression.java
  72. 182 0
      src/main/java/org/ccframe/commons/jpaquery/Restrictions.java
  73. 75 0
      src/main/java/org/ccframe/commons/jpaquery/SimpleExpression.java
  74. 28 0
      src/main/java/org/ccframe/commons/mvc/CcUserArgumentResolver.java
  75. 45 0
      src/main/java/org/ccframe/commons/mvc/CcframeTransactionManager.java
  76. 97 0
      src/main/java/org/ccframe/commons/mvc/ClientPage.java
  77. 17 0
      src/main/java/org/ccframe/commons/mvc/QueryObject.java
  78. 24 0
      src/main/java/org/ccframe/commons/mvc/StringToDateConverter.java
  79. 57 0
      src/main/java/org/ccframe/commons/quartz/BaseQuartzJob.java
  80. 72 0
      src/main/java/org/ccframe/commons/quartz/JobMonitorListener.java
  81. 31 0
      src/main/java/org/ccframe/commons/quartz/QuartzJobFactory.java
  82. 42 0
      src/main/java/org/ccframe/commons/quartz/Scheduled.java
  83. 43 0
      src/main/java/org/ccframe/commons/queue/QueueClient.java
  84. 91 0
      src/main/java/org/ccframe/commons/queue/QueueServer.java
  85. 99 0
      src/main/java/org/ccframe/commons/servlet/CaptchaServlet.java
  86. 98 0
      src/main/java/org/ccframe/commons/servlet/QrcodeServlet.java
  87. 21 0
      src/main/java/org/ccframe/commons/sms/ISmsAdapter.java
  88. 28 0
      src/main/java/org/ccframe/commons/sms/SmsException.java
  89. 81 0
      src/main/java/org/ccframe/commons/util/BigDecimalUtil.java
  90. 61 0
      src/main/java/org/ccframe/commons/util/BrowserTimeCacheUtil.java
  91. 159 0
      src/main/java/org/ccframe/commons/util/BusinessException.java
  92. 162 0
      src/main/java/org/ccframe/commons/util/CcTransactionUtil.java
  93. 107 0
      src/main/java/org/ccframe/commons/util/DESUtil.java
  94. 170 0
      src/main/java/org/ccframe/commons/util/DbUnitUtils.java
  95. 53 0
      src/main/java/org/ccframe/commons/util/ESPathUtil.java
  96. 76 0
      src/main/java/org/ccframe/commons/util/EnumFromCodeUtil.java
  97. 66 0
      src/main/java/org/ccframe/commons/util/FlatXmlWriterEx.java
  98. 108 0
      src/main/java/org/ccframe/commons/util/JsonBinder.java
  99. 132 0
      src/main/java/org/ccframe/commons/util/JsonUtil.java
  100. 49 0
      src/main/java/org/ccframe/commons/util/JwtUtil.java

+ 49 - 0
.gitignore

@@ -0,0 +1,49 @@
+target/
+!.mvn/wrapper/maven-wrapper.jar
+!**/src/main/**/target/
+!**/src/test/**/target/
+
+### IntelliJ IDEA ###
+.idea/modules.xml
+.idea/jarRepositories.xml
+.idea/compiler.xml
+.idea/libraries/
+*.iws
+*.iml
+*.ipr
+
+### Eclipse ###
+.apt_generated
+.classpath
+.factorypath
+.project
+.settings
+.springBeans
+.sts4-cache
+
+### NetBeans ###
+/nbproject/private/
+/nbbuild/
+/dist/
+/nbdist/
+/.nb-gradle/
+build/
+!**/src/main/**/build/
+!**/src/test/**/build/
+
+### VS Code ###
+.vscode/
+
+### Mac OS ###
+.DS_Store
+
+/build/
+/bin/
+/target/
+/war/smallpict/20*
+/war/upload/20*
+/war/WEB-INF/classes
+*.pid
+.gradle/
+/logs/
+/war/WEB-INF/eshome/esdata/nodes/

+ 370 - 0
build.gradle

@@ -0,0 +1,370 @@
+//---------------------- 公共部分 ----------------------//
+
+//系统启动初始化:自动下载插件
+buildscript {
+    repositories {
+    	maven { url 'https://maven.aliyun.com/nexus/content/groups/public/' } //阿里云
+    }
+}
+
+//公共仓库配置
+repositories {
+    maven { url 'https://maven.aliyun.com/nexus/content/groups/public/' } //阿里云
+}
+
+//---------------------- JAVA 编译插件 ----------------------//
+
+apply plugin: 'java-library'
+
+//JAVA 文件编码
+compileJava.options.encoding = 'UTF-8'
+tasks.withType(JavaCompile) {  
+    options.debug = true //输出行号
+    options.debugOptions.debugLevel = "source,lines,vars" //输出行号
+    options.encoding = "UTF-8"  
+}
+
+//JAVA兼容性设置
+sourceCompatibility = '1.8'
+targetCompatibility = '1.8'
+
+//---------------------- 变量定义 ---------------------
+def ccProjectName='ccframe'
+def ccPubVersion='2.0-SNAPSHOT' //正式TAG版本请修改此版本号
+
+def ccReleaseTime=new Date().format("yyyy-MM-dd_HH-mm-ss", TimeZone.getTimeZone("Asia/Shanghai"))
+def ccBuildNumber=System.getenv().BUILD_NUMBER == null ? 'debug' : System.getenv().BUILD_NUMBER?.toInteger()
+def ccSvnVersion=System.getenv().SVN_REVISION == null ? 'debug' : System.getenv().SVN_REVISION?.toInteger()
+
+//---------------------- JAR 打包插件 ----------------------//
+apply plugin: 'application'
+mainClassName = 'org.ccframe.app.App'
+jar {
+	manifestContentCharset 'utf-8'
+	metadataCharset 'utf-8'
+    manifest {
+        attributes 'Main-Class': 'org.ccframe.app.App'
+    }
+}
+
+task clearJar(type: Delete) {
+    delete "$buildDir\\libs\\lib"
+}
+
+task copyJar(type: Copy, dependsOn: 'clearJar') { //提取所有的jar
+    from configurations.compileClasspath{
+    	exclude 'lombok*'
+    	exclude 'jsp-api*'
+    }
+    into "$buildDir\\libs\\lib"
+}
+
+/*
+bootJar {//配合jar服务器启动.bat脚本
+    // 例外所有的jar
+    excludes = ["*.jar"]
+    // lib目录的清除和复制任务 
+    dependsOn clearJar
+    dependsOn copyJar
+ 
+    // 指定依赖包的路径
+    manifest {
+        attributes "Manifest-Version": 1.0,
+            'Class-Path': configurations.compileClasspath.files.collect { "lib/$it.name" }.join(' ')
+    }
+}
+//还有个https://github.com/spring-projects-experimental/spring-boot-thin-launcher有空试试
+*/
+
+/* ----------------- buildship 整合 --------------- */
+apply plugin: 'java'
+apply plugin: 'eclipse'
+eclipse {
+    classpath {
+    	downloadSources = true
+        defaultOutputDir = file('/war/WEB-INF/classes')
+        file {
+		    whenMerged {
+		    	//兼容eclipse2020设置:test输出目录要与main的不一样
+		    	entries.each {
+		    		if(it.path.startsWith('src/main')){
+		    		println it
+		    			it.output = null
+		    		}
+		    	}
+			}
+        }
+    }
+}
+
+
+//----------------------- 编译JAR依赖配置 ------------------------//
+configurations.all {
+    transitive = false //默认不自动关联依赖,以免打包过大
+    resolutionStrategy.cacheChangingModulesFor 0, 'seconds' //立即检查而不是要过24h,因为有snapshot
+    exclude group: 'org.apache.logging.log4j', module: 'log4j-to-slf4j' // 关联发布时,强制移除冲突的jar
+    exclude group: 'ch.qos.logback' // 关联发布时,强制移除冲突的jar
+    exclude group: 'pull-parser'
+}
+dependencies {
+    //参与编译与发布.
+    implementation 'org.springframework.boot:spring-boot:2.3.2.RELEASE'
+    implementation 'org.springframework.boot:spring-boot-starter:2.3.2.RELEASE'
+    implementation 'org.springframework.boot:spring-boot-starter-web:2.3.2.RELEASE'
+    implementation 'org.springframework.boot:spring-boot-autoconfigure:2.3.2.RELEASE'
+    implementation 'org.springframework.boot:spring-boot-starter-data-jpa:2.3.2.RELEASE'
+    implementation 'org.springframework.boot:spring-boot-starter-freemarker:2.3.2.RELEASE'
+	implementation 'org.springframework.boot:spring-boot-starter-log4j2:2.3.2.RELEASE'
+    implementation 'org.springframework.boot:spring-boot-starter-tomcat:2.3.2.RELEASE'
+    implementation ('org.springframework.boot:spring-boot-starter-data-elasticsearch:2.3.2.RELEASE') {
+    	exclude group: 'org.springframework.data', module: 'spring-data-elasticsearch'
+    }
+    implementation 'org.springframework.boot:spring-boot-starter-security:2.3.2.RELEASE'
+    implementation 'org.springframework.boot:spring-boot-starter-quartz:2.3.2.RELEASE'
+    implementation 'org.springframework.boot:spring-boot-starter-actuator:2.3.2.RELEASE'
+//	implementation 'org.springframework.boot:spring-boot-starter-data-redis-reactive:2.3.2.RELEASE'
+    
+    implementation 'org.apache.tomcat.embed:tomcat-embed-core:9.0.37'
+    implementation 'org.apache.tomcat.embed:tomcat-embed-websocket:9.0.37'
+    implementation 'javax.servlet:javax.servlet-api:4.0.1'
+
+    implementation 'commons-fileupload:commons-fileupload:1.3.3'
+    implementation 'commons-io:commons-io:2.5'
+    implementation 'commons-jxpath:commons-jxpath:1.3'
+    implementation 'commons-lang:commons-lang:2.6'
+    implementation 'org.apache.commons:commons-lang3:3.11'
+    implementation 'commons-collections:commons-collections:3.2.2'
+	implementation 'org.apache.commons:commons-collections4:4.4'
+    implementation 'commons-codec:commons-codec:1.15'
+
+    implementation 'org.springframework:spring-core:5.2.8.RELEASE'
+    implementation 'org.springframework:spring-beans:5.2.8.RELEASE'
+    implementation 'org.springframework:spring-web:5.2.8.RELEASE'
+    implementation 'org.springframework:spring-webmvc:5.2.8.RELEASE'
+    implementation 'org.springframework:spring-context:5.2.8.RELEASE'
+    implementation 'org.springframework:spring-context-support:5.2.8.RELEASE'
+    implementation 'org.springframework:spring-tx:5.2.8.RELEASE'
+    implementation 'org.springframework:spring-orm:5.2.8.RELEASE'
+    implementation 'org.springframework:spring-aop:5.2.8.RELEASE'
+    implementation 'org.springframework:spring-jdbc:5.2.8.RELEASE'
+    implementation 'org.springframework:spring-expression:5.2.8.RELEASE'
+
+    implementation 'org.apache.poi:poi:3.14' //3.16要commons-collection4,700多k,暂不更新
+    implementation 'org.apache.poi:poi-ooxml:3.14'
+    implementation 'org.apache.poi:poi-ooxml-schemas:3.14'
+    implementation 'org.apache.poi:poi-scratchpad:3.14'
+    implementation 'org.apache.xmlbeans:xmlbeans:3.1.0'
+    implementation 'stax:stax-api:1.0.1'
+
+	//权限认证
+	implementation 'org.springframework.security:spring-security-config:5.3.3.RELEASE'
+	implementation 'org.springframework.security:spring-security-core:5.3.3.RELEASE'
+	implementation 'org.springframework.security:spring-security-web:5.3.3.RELEASE'
+//	implementation 'org.springframework.security:spring-security-jwt:1.1.1.RELEASE'
+
+    implementation 'org.springframework.data:spring-data-commons:2.2.13.RELEASE'
+    implementation 'org.springframework.data:spring-data-jpa:2.2.13.RELEASE'
+    implementation 'org.springframework.data:spring-data-elasticsearch:3.2.13.RELEASE' //锁定版本 https://mvnrepository.com/artifact/org.springframework.data/spring-data-releasetrain/Moore-SR13
+//	implementation 'org.springframework.data:spring-data-redis:2.2.13.RELEASE' //lettuce驱动需要
+//	implementation 'org.springframework.data:spring-data-keyvalue:2.2.13.RELEASE' //lettuce驱动需要
+
+    implementation 'org.apache.lucene:lucene-analyzers-common:7.7.3'
+    implementation 'org.apache.lucene:lucene-core:7.7.3'
+    implementation 'org.apache.lucene:lucene-highlighter:7.7.3'
+    implementation 'org.apache.lucene:lucene-join:7.7.3'
+    implementation 'org.apache.lucene:lucene-memory:7.7.3'
+    implementation 'org.apache.lucene:lucene-queries:7.7.3'
+    implementation 'org.apache.lucene:lucene-queryparser:7.7.3'
+    implementation 'org.apache.lucene:lucene-spatial:7.7.3'
+    implementation 'org.apache.lucene:lucene-suggest:7.7.3'
+    implementation 'org.apache.lucene:lucene-sandbox:7.7.3'
+    implementation 'org.apache.lucene:lucene-misc:7.7.3'
+    implementation 'org.apache.lucene:lucene-grouping:7.7.3'
+    implementation 'joda-time:joda-time:2.9.9'
+    
+    implementation 'org.elasticsearch:elasticsearch-core:6.8.19'
+    implementation 'org.elasticsearch:elasticsearch:6.8.19'
+    implementation 'org.elasticsearch:elasticsearch-x-content:6.8.19'
+    implementation 'org.elasticsearch.plugin:transport-netty4-client:6.8.19'
+    implementation 'org.elasticsearch.plugin:rank-eval-client:6.8.19'
+	implementation 'org.apache.commons:commons-pool2:2.8.0'
+    implementation 'org.apache.httpcomponents:httpclient:4.5.12'
+    implementation 'org.apache.httpcomponents:httpcore:4.4.13'
+    implementation 'org.apache.httpcomponents:httpcore-nio:4.4.13'
+    implementation 'org.apache.httpcomponents:httpasyncclient:4.1.4'
+    
+    //ES本地服务额外需要
+    implementation 'com.fasterxml.jackson.dataformat:jackson-dataformat-smile:2.11.0'
+    implementation 'org.apache.logging.log4j:log4j-core:2.14.1'
+    implementation 'org.elasticsearch:elasticsearch-cli:6.8.19'
+    implementation 'com.tdunning:t-digest:3.2'
+    implementation 'org.elasticsearch:jna:5.5.0'
+    //implementation files('/lib/plugin-classloader-6.8.19.jar')
+    //implementation 'org.codelibs.elasticsearch.lib:plugin-classloader:6.8.19'
+	implementation 'org.codelibs.elasticsearch.lib:plugin-classloader:6.8.12'
+    
+    implementation 'org.hibernate:hibernate-core:5.4.19.Final'
+    implementation 'org.hibernate.javax.persistence:hibernate-jpa-2.1-api:1.0.2.Final'
+    implementation 'org.hibernate.common:hibernate-commons-annotations:5.1.0.Final'
+    implementation 'net.bytebuddy:byte-buddy-dep:1.10.14'
+    implementation 'org.ow2.asm:asm:8.0.1'
+    implementation 'org.ow2.asm:asm-commons:8.0.1'
+    implementation 'org.jboss:jandex:2.1.3.Final'
+    implementation 'jakarta.persistence:jakarta.persistence-api:2.2.3'
+
+    implementation 'org.springframework.plugin:spring-plugin-core:1.2.0.RELEASE'
+    implementation 'org.springframework.plugin:spring-plugin-metadata:1.2.0.RELEASE'
+
+//	implementation 'io.lettuce:lettuce-core:5.3.1.RELEASE'
+//	implementation 'io.netty:netty-all:4.1.50.Final'
+//	implementation 'io.projectreactor:reactor-core:3.3.6.RELEASE'
+//	implementation 'org.reactivestreams:reactive-streams:1.0.3'
+
+	implementation 'redis.clients:jedis:2.9.3' //j2cache限制不要升级
+	implementation 'io.netty:netty-all:4.1.50.Final'
+
+//	implementation 'io.springfox:springfox-swagger-ui:2.9.2' //改用knife4j ui
+	implementation 'io.springfox:springfox-swagger-common:2.9.2'    
+	implementation 'io.springfox:springfox-swagger2:2.9.2'
+	implementation 'io.springfox:springfox-spring-web:2.9.2'
+    implementation 'io.swagger:swagger-models:1.5.21'
+    implementation 'io.swagger:swagger-annotations:1.5.21'
+	implementation 'io.springfox:springfox-core:2.9.2'
+	implementation 'io.springfox:springfox-spi:2.9.2'
+	implementation 'io.springfox:springfox-schema:2.9.2'
+    implementation 'org.mapstruct:mapstruct:1.2.0.Final'
+    implementation 'io.github.wilson-he:swagger2-spring-boot-starter:1.1.2'
+    implementation 'com.google.guava:failureaccess:1.0.1'
+
+	implementation ('com.github.xiaoymin:knife4j-spring-boot-autoconfigure:2.0.9') {
+		exclude group: 'com.github.xiaoymin', module: 'knife4j-spring'
+	}
+	implementation 'com.github.xiaoymin:knife4j-spring-ui:2.0.9'
+
+	implementation 'org.apache.logging.log4j:log4j-slf4j-impl:2.14.1'
+    implementation 'org.slf4j:slf4j-api:1.7.30'
+    implementation 'commons-logging:commons-logging:1.2'
+    implementation 'org.apache.logging.log4j:log4j-api:2.14.1'
+//    implementation 'org.apache.logging.log4j:log4j-core:2.14.1'
+    implementation 'org.yaml:snakeyaml:1.26'
+    implementation 'com.fasterxml.jackson.core:jackson-annotations:2.11.0'
+    implementation 'com.fasterxml.jackson.core:jackson-core:2.11.0'
+    implementation 'com.fasterxml.jackson.core:jackson-databind:2.11.0'
+    implementation 'com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.11.0'
+
+    implementation 'org.freemarker:freemarker:2.3.30'
+    implementation 'com.alibaba:druid:1.1.23'
+    implementation 'mysql:mysql-connector-java:8.0.21'
+    implementation 'org.redisson:redisson:3.15.6' //redis分布式扩展支持
+    implementation 'org.jboss.marshalling:jboss-marshalling:2.0.9.Final'
+    implementation 'org.jboss.marshalling:jboss-marshalling-river:2.0.9.Final'
+
+	//hanlp词法分析器 http://hanlp.linrunsoft.com/index.html
+	implementation 'com.hankcs:hanlp:portable-1.7.1'
+	implementation 'com.google.guava:guava:29.0-jre'
+    implementation 'com.carrotsearch:hppc:0.8.2'
+
+    implementation 'org.slf4j:jul-to-slf4j:1.7.30'
+    implementation 'org.springframework:spring-jcl:5.2.8.RELEASE'
+
+    implementation 'com.fasterxml.jackson.module:jackson-module-parameter-names:2.11.0'
+    implementation 'com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.11.0'
+    implementation 'com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.11.0'
+
+	implementation 'net.oschina.j2cache:j2cache-spring-boot2-starter:2.8.0-release'
+	implementation 'net.oschina.j2cache:j2cache-core:2.8.2-release'
+	implementation 'net.oschina.j2cache:j2cache-springcache:2.8.0-release'
+	implementation 'de.ruedigermoeller:fst:2.57'
+	implementation 'org.objenesis:objenesis:3.1'
+
+	implementation 'com.github.ben-manes.caffeine:caffeine:2.8.4'
+    
+    implementation 'com.alibaba:fastjson:1.2.62'
+    implementation 'com.fasterxml:classmate:1.5.1'
+    implementation 'org.javassist:javassist:3.27.0-GA'
+    implementation 'org.jboss.logging:jboss-logging:3.4.1.Final'
+    implementation 'org.apache.geronimo.specs:geronimo-jta_1.1_spec:1.1.1'
+    implementation 'org.dom4j:dom4j:2.1.3'
+    implementation 'jakarta.annotation:jakarta.annotation-api:1.3.5'
+
+    implementation 'net.coobird:thumbnailator:0.4.8'
+	implementation 'org.dbunit:dbunit:2.5.4' //不要升级
+	implementation 'org.apache.ant:ant:1.9.9'
+    implementation 'antlr:antlr:2.7.7'
+    
+    implementation 'io.jsonwebtoken:jjwt:0.9.1' //token方案
+    
+    //集群定时任务
+    implementation 'org.quartz-scheduler:quartz:2.3.2'
+    implementation 'org.quartz-scheduler:quartz-jobs:2.3.2'
+    
+	implementation 'com.github.binarywang:weixin-java-miniapp:3.8.0' //微信小程序二维码支持
+	
+    implementation 'org.elasticsearch.client:elasticsearch-rest-high-level-client:6.8.19'
+    implementation 'org.elasticsearch.client:elasticsearch-rest-client:6.8.19'
+    implementation 'org.elasticsearch.plugin:lang-mustache-client:6.8.19'
+
+	implementation 'uk.org.lidalia:sysout-over-slf4j:1.0.2' //system out err stacktrace转slf4j,便于统一日志
+	//implementation 'net.logstash.logback:logstash-logback-encoder:6.4'
+	//implementation 'org.sejda.imageio:webp-imageio:0.1.6' //webp的imageIO,用于小图
+
+	implementation 'io.minio:minio:8.4.1' //minio分布式文件
+	implementation 'com.squareup.okhttp3:okhttp:4.9.0' //minio用的http client
+	implementation 'com.squareup.okio:okio:2.8.0'  //minio用的http client
+	implementation 'org.jetbrains.kotlin:kotlin-stdlib:1.4.10'
+	
+	implementation 'org.apache.shardingsphere:sharding-jdbc-spring-boot-starter:4.1.1' //分库分表的支持
+	implementation 'org.apache.shardingsphere:shardingsphere-common:4.1.1'
+	implementation 'org.apache.shardingsphere:sharding-spring-boot-util:4.1.1'
+	implementation 'org.apache.shardingsphere:sharding-transaction-spring:4.1.1'
+	implementation 'org.apache.shardingsphere:sharding-jdbc-core:4.1.1'
+	implementation 'org.apache.shardingsphere:shardingsphere-pluggable:4.1.1'
+	implementation 'org.apache.shardingsphere:sharding-transaction-core:4.1.1'
+	implementation 'org.apache.shardingsphere:shardingsphere-sql-parser-sql92:4.1.1'
+	implementation 'org.apache.shardingsphere:shardingsphere-sql-parser-engine:4.1.1'
+	implementation 'org.apache.shardingsphere:shardingsphere-route:4.1.1'
+	implementation 'org.apache.shardingsphere:shardingsphere-executor:4.1.1'
+	implementation 'org.apache.shardingsphere:shardingsphere-spi:4.1.1'
+	implementation 'org.apache.shardingsphere:shardingsphere-sql-parser-binder:4.1.1'
+	implementation 'org.apache.shardingsphere:shardingsphere-rewrite-engine:4.1.1'
+	implementation 'org.apache.shardingsphere:shardingsphere-merge:4.1.1'
+	implementation 'org.apache.shardingsphere:shardingsphere-pluggable:4.1.1'
+	implementation 'org.apache.shardingsphere:master-slave-core-route:4.1.1'
+	implementation 'org.apache.shardingsphere:sharding-core-common:4.1.1'
+	implementation 'org.apache.shardingsphere:sharding-core-route:4.1.1'
+	implementation 'org.apache.shardingsphere:sharding-core-api:4.1.1'
+	implementation 'org.apache.shardingsphere:sharding-core-rewrite:4.1.1'
+	implementation 'org.apache.shardingsphere:encrypt-core-rewrite:4.1.1'
+	implementation 'org.apache.shardingsphere:encrypt-core-merge:4.1.1'
+	implementation 'org.apache.shardingsphere:sharding-core-execute:4.1.1'
+	implementation 'org.apache.shardingsphere:sharding-core-merge:4.1.1'
+	implementation 'org.apache.shardingsphere:encrypt-core-common:4.1.1'
+	implementation 'org.apache.shardingsphere:encrypt-core-api:4.1.1'
+	implementation 'org.apache.shardingsphere:shardingsphere-sql-parser-spi:4.1.1'
+	implementation 'org.apache.shardingsphere:shardingsphere-sql-parser-mysql:4.1.1'
+	implementation 'org.apache.shardingsphere:shardingsphere-sql-parser-statement:4.1.1'
+	implementation 'org.codehaus.groovy:groovy:2.4.5'
+	implementation 'org.antlr:antlr4-runtime:4.7.2'
+
+	//二维码servlet使用
+	implementation 'com.google.zxing:core:3.4.1'
+	implementation 'com.google.zxing:javase:3.4.1'
+
+	//监控
+	implementation 'org.springframework.boot:spring-boot-actuator-autoconfigure:2.3.2.RELEASE'
+	implementation 'org.latencyutils:LatencyUtils:2.0.3'
+	implementation 'io.micrometer:micrometer-core:1.5.3'
+	implementation 'org.springframework.boot:spring-boot-actuator:2.3.2.RELEASE'
+
+	//其它编译但是不需要发布的
+    compileOnly 'javax.servlet.jsp:jsp-api:2.2'
+    compileOnly 'org.projectlombok:lombok:1.18.28'
+	annotationProcessor 'org.projectlombok:lombok:1.18.28'
+
+    //参与测试不进行发布
+    testImplementation 'junit:junit:4.8' //4.11的版本需要额外包
+
+}
+
+
+

BIN
doc/1-Management/project/CCFrame移动开发架构2023.pptx


BIN
doc/4-Other/redis-6.0.3-win64/msys-2.0.dll


BIN
doc/4-Other/redis-6.0.3-win64/redis-check-aof.exe


BIN
doc/4-Other/redis-6.0.3-win64/redis-check-rdb.exe


BIN
doc/4-Other/redis-6.0.3-win64/redis-cli.exe


BIN
doc/4-Other/redis-6.0.3-win64/redis-server.exe


+ 4 - 0
doc/4-Other/redis-6.0.3-win64/redis.bat

@@ -0,0 +1,4 @@
+taskkill /f /fi "windowtitle eq redis²âÊÔ"
+cd %~dp0
+del /Q dump.rdb
+start "redis²âÊÔ" redis-server.exe

+ 1811 - 0
doc/4-Other/redis-6.0.3-win64/redis.conf

@@ -0,0 +1,1811 @@
+# Redis configuration file example.
+#
+# Note that in order to read the configuration file, Redis must be
+# started with the file path as first argument:
+#
+# ./redis-server /path/to/redis.conf
+
+# Note on units: when memory size is needed, it is possible to specify
+# it in the usual form of 1k 5GB 4M and so forth:
+#
+# 1k => 1000 bytes
+# 1kb => 1024 bytes
+# 1m => 1000000 bytes
+# 1mb => 1024*1024 bytes
+# 1g => 1000000000 bytes
+# 1gb => 1024*1024*1024 bytes
+#
+# units are case insensitive so 1GB 1Gb 1gB are all the same.
+
+################################## INCLUDES ###################################
+
+# Include one or more other config files here.  This is useful if you
+# have a standard template that goes to all Redis servers but also need
+# to customize a few per-server settings.  Include files can include
+# other files, so use this wisely.
+#
+# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
+# from admin or Redis Sentinel. Since Redis always uses the last processed
+# line as value of a configuration directive, you'd better put includes
+# at the beginning of this file to avoid overwriting config change at runtime.
+#
+# If instead you are interested in using includes to override configuration
+# options, it is better to use include as the last line.
+#
+# include /path/to/local.conf
+# include /path/to/other.conf
+
+################################## MODULES #####################################
+
+# Load modules at startup. If the server is not able to load modules
+# it will abort. It is possible to use multiple loadmodule directives.
+#
+# loadmodule /path/to/my_module.so
+# loadmodule /path/to/other_module.so
+
+################################## NETWORK #####################################
+
+# By default, if no "bind" configuration directive is specified, Redis listens
+# for connections from all the network interfaces available on the server.
+# It is possible to listen to just one or multiple selected interfaces using
+# the "bind" configuration directive, followed by one or more IP addresses.
+#
+# Examples:
+#
+# bind 192.168.1.100 10.0.0.1
+# bind 127.0.0.1 ::1
+#
+# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
+# internet, binding to all the interfaces is dangerous and will expose the
+# instance to everybody on the internet. So by default we uncomment the
+# following bind directive, that will force Redis to listen only into
+# the IPv4 loopback interface address (this means Redis will be able to
+# accept connections only from clients running into the same computer it
+# is running).
+#
+# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
+# JUST COMMENT THE FOLLOWING LINE.
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+bind 127.0.0.1
+
+# Protected mode is a layer of security protection, in order to avoid that
+# Redis instances left open on the internet are accessed and exploited.
+#
+# When protected mode is on and if:
+#
+# 1) The server is not binding explicitly to a set of addresses using the
+#    "bind" directive.
+# 2) No password is configured.
+#
+# The server only accepts connections from clients connecting from the
+# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain
+# sockets.
+#
+# By default protected mode is enabled. You should disable it only if
+# you are sure you want clients from other hosts to connect to Redis
+# even if no authentication is configured, nor a specific set of interfaces
+# are explicitly listed using the "bind" directive.
+protected-mode yes
+
+# Accept connections on the specified port, default is 6379 (IANA #815344).
+# If port 0 is specified Redis will not listen on a TCP socket.
+port 6379
+
+# TCP listen() backlog.
+#
+# In high requests-per-second environments you need an high backlog in order
+# to avoid slow clients connections issues. Note that the Linux kernel
+# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
+# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
+# in order to get the desired effect.
+tcp-backlog 511
+
+# Unix socket.
+#
+# Specify the path for the Unix socket that will be used to listen for
+# incoming connections. There is no default, so Redis will not listen
+# on a unix socket when not specified.
+#
+# unixsocket /tmp/redis.sock
+# unixsocketperm 700
+
+# Close the connection after a client is idle for N seconds (0 to disable)
+timeout 0
+
+# TCP keepalive.
+#
+# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
+# of communication. This is useful for two reasons:
+#
+# 1) Detect dead peers.
+# 2) Take the connection alive from the point of view of network
+#    equipment in the middle.
+#
+# On Linux, the specified value (in seconds) is the period used to send ACKs.
+# Note that to close the connection the double of the time is needed.
+# On other kernels the period depends on the kernel configuration.
+#
+# A reasonable value for this option is 300 seconds, which is the new
+# Redis default starting with Redis 3.2.1.
+tcp-keepalive 300
+
+################################# TLS/SSL #####################################
+
+# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration
+# directive can be used to define TLS-listening ports. To enable TLS on the
+# default port, use:
+#
+# port 0
+# tls-port 6379
+
+# Configure a X.509 certificate and private key to use for authenticating the
+# server to connected clients, masters or cluster peers.  These files should be
+# PEM formatted.
+#
+# tls-cert-file redis.crt 
+# tls-key-file redis.key
+
+# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange:
+#
+# tls-dh-params-file redis.dh
+
+# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL
+# clients and peers.  Redis requires an explicit configuration of at least one
+# of these, and will not implicitly use the system wide configuration.
+#
+# tls-ca-cert-file ca.crt
+# tls-ca-cert-dir /etc/ssl/certs
+
+# By default, clients (including replica servers) on a TLS port are required
+# to authenticate using valid client side certificates.
+#
+# It is possible to disable authentication using this directive.
+#
+# tls-auth-clients no
+
+# By default, a Redis replica does not attempt to establish a TLS connection
+# with its master.
+#
+# Use the following directive to enable TLS on replication links.
+#
+# tls-replication yes
+
+# By default, the Redis Cluster bus uses a plain TCP connection. To enable
+# TLS for the bus protocol, use the following directive:
+#
+# tls-cluster yes
+
+# Explicitly specify TLS versions to support. Allowed values are case insensitive
+# and include "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3" (OpenSSL >= 1.1.1) 
+#
+# tls-protocols TLSv1.2
+
+# Configure allowed ciphers.  See the ciphers(1ssl) manpage for more information
+# about the syntax of this string.
+#
+# Note: this configuration applies only to <= TLSv1.2.
+#
+# tls-ciphers DEFAULT:!MEDIUM
+
+# Configure allowed TLSv1.3 ciphersuites.  See the ciphers(1ssl) manpage for more
+# information about the syntax of this string, and specifically for TLSv1.3
+# ciphersuites.
+#
+# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256
+
+# When choosing a cipher, use the server's preference instead of the client
+# preference. By default, the server follows the client's preference.
+#
+# tls-prefer-server-ciphers yes
+
+################################# GENERAL #####################################
+
+# By default Redis does not run as a daemon. Use 'yes' if you need it.
+# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
+daemonize no
+
+# If you run Redis from upstart or systemd, Redis can interact with your
+# supervision tree. Options:
+#   supervised no      - no supervision interaction
+#   supervised upstart - signal upstart by putting Redis into SIGSTOP mode
+#   supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
+#   supervised auto    - detect upstart or systemd method based on
+#                        UPSTART_JOB or NOTIFY_SOCKET environment variables
+# Note: these supervision methods only signal "process is ready."
+#       They do not enable continuous liveness pings back to your supervisor.
+supervised no
+
+# If a pid file is specified, Redis writes it where specified at startup
+# and removes it at exit.
+#
+# When the server runs non daemonized, no pid file is created if none is
+# specified in the configuration. When the server is daemonized, the pid file
+# is used even if not specified, defaulting to "/var/run/redis.pid".
+#
+# Creating a pid file is best effort: if Redis is not able to create it
+# nothing bad happens, the server will start and run normally.
+pidfile /var/run/redis_6379.pid
+
+# Specify the server verbosity level.
+# This can be one of:
+# debug (a lot of information, useful for development/testing)
+# verbose (many rarely useful info, but not a mess like the debug level)
+# notice (moderately verbose, what you want in production probably)
+# warning (only very important / critical messages are logged)
+loglevel notice
+
+# Specify the log file name. Also the empty string can be used to force
+# Redis to log on the standard output. Note that if you use standard
+# output for logging but daemonize, logs will be sent to /dev/null
+logfile ""
+
+# To enable logging to the system logger, just set 'syslog-enabled' to yes,
+# and optionally update the other syslog parameters to suit your needs.
+# syslog-enabled no
+
+# Specify the syslog identity.
+# syslog-ident redis
+
+# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
+# syslog-facility local0
+
+# Set the number of databases. The default database is DB 0, you can select
+# a different one on a per-connection basis using SELECT <dbid> where
+# dbid is a number between 0 and 'databases'-1
+databases 16
+
+# By default Redis shows an ASCII art logo only when started to log to the
+# standard output and if the standard output is a TTY. Basically this means
+# that normally a logo is displayed only in interactive sessions.
+#
+# However it is possible to force the pre-4.0 behavior and always show a
+# ASCII art logo in startup logs by setting the following option to yes.
+always-show-logo yes
+
+################################ SNAPSHOTTING  ################################
+#
+# Save the DB on disk:
+#
+#   save <seconds> <changes>
+#
+#   Will save the DB if both the given number of seconds and the given
+#   number of write operations against the DB occurred.
+#
+#   In the example below the behaviour will be to save:
+#   after 900 sec (15 min) if at least 1 key changed
+#   after 300 sec (5 min) if at least 10 keys changed
+#   after 60 sec if at least 10000 keys changed
+#
+#   Note: you can disable saving completely by commenting out all "save" lines.
+#
+#   It is also possible to remove all the previously configured save
+#   points by adding a save directive with a single empty string argument
+#   like in the following example:
+#
+   save ""
+
+#save 900 1
+#save 300 10
+#save 60 10000
+
+# By default Redis will stop accepting writes if RDB snapshots are enabled
+# (at least one save point) and the latest background save failed.
+# This will make the user aware (in a hard way) that data is not persisting
+# on disk properly, otherwise chances are that no one will notice and some
+# disaster will happen.
+#
+# If the background saving process will start working again Redis will
+# automatically allow writes again.
+#
+# However if you have setup your proper monitoring of the Redis server
+# and persistence, you may want to disable this feature so that Redis will
+# continue to work as usual even if there are problems with disk,
+# permissions, and so forth.
+stop-writes-on-bgsave-error yes
+
+# Compress string objects using LZF when dump .rdb databases?
+# For default that's set to 'yes' as it's almost always a win.
+# If you want to save some CPU in the saving child set it to 'no' but
+# the dataset will likely be bigger if you have compressible values or keys.
+rdbcompression yes
+
+# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
+# This makes the format more resistant to corruption but there is a performance
+# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
+# for maximum performances.
+#
+# RDB files created with checksum disabled have a checksum of zero that will
+# tell the loading code to skip the check.
+rdbchecksum yes
+
+# The filename where to dump the DB
+dbfilename dump.rdb
+
+# Remove RDB files used by replication in instances without persistence
+# enabled. By default this option is disabled, however there are environments
+# where for regulations or other security concerns, RDB files persisted on
+# disk by masters in order to feed replicas, or stored on disk by replicas
+# in order to load them for the initial synchronization, should be deleted
+# ASAP. Note that this option ONLY WORKS in instances that have both AOF
+# and RDB persistence disabled, otherwise is completely ignored.
+#
+# An alternative (and sometimes better) way to obtain the same effect is
+# to use diskless replication on both master and replicas instances. However
+# in the case of replicas, diskless is not always an option.
+rdb-del-sync-files no
+
+# The working directory.
+#
+# The DB will be written inside this directory, with the filename specified
+# above using the 'dbfilename' configuration directive.
+#
+# The Append Only File will also be created inside this directory.
+#
+# Note that you must specify a directory here, not a file name.
+dir ./
+
+################################# REPLICATION #################################
+
+# Master-Replica replication. Use replicaof to make a Redis instance a copy of
+# another Redis server. A few things to understand ASAP about Redis replication.
+#
+#   +------------------+      +---------------+
+#   |      Master      | ---> |    Replica    |
+#   | (receive writes) |      |  (exact copy) |
+#   +------------------+      +---------------+
+#
+# 1) Redis replication is asynchronous, but you can configure a master to
+#    stop accepting writes if it appears to be not connected with at least
+#    a given number of replicas.
+# 2) Redis replicas are able to perform a partial resynchronization with the
+#    master if the replication link is lost for a relatively small amount of
+#    time. You may want to configure the replication backlog size (see the next
+#    sections of this file) with a sensible value depending on your needs.
+# 3) Replication is automatic and does not need user intervention. After a
+#    network partition replicas automatically try to reconnect to masters
+#    and resynchronize with them.
+#
+# replicaof <masterip> <masterport>
+
+# If the master is password protected (using the "requirepass" configuration
+# directive below) it is possible to tell the replica to authenticate before
+# starting the replication synchronization process, otherwise the master will
+# refuse the replica request.
+#
+# masterauth <master-password>
+#
+# However this is not enough if you are using Redis ACLs (for Redis version
+# 6 or greater), and the default user is not capable of running the PSYNC
+# command and/or other commands needed for replication. In this case it's
+# better to configure a special user to use with replication, and specify the
+# masteruser configuration as such:
+#
+# masteruser <username>
+#
+# When masteruser is specified, the replica will authenticate against its
+# master using the new AUTH form: AUTH <username> <password>.
+
+# When a replica loses its connection with the master, or when the replication
+# is still in progress, the replica can act in two different ways:
+#
+# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will
+#    still reply to client requests, possibly with out of date data, or the
+#    data set may just be empty if this is the first synchronization.
+#
+# 2) if replica-serve-stale-data is set to 'no' the replica will reply with
+#    an error "SYNC with master in progress" to all the kind of commands
+#    but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG,
+#    SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB,
+#    COMMAND, POST, HOST: and LATENCY.
+#
+replica-serve-stale-data yes
+
+# You can configure a replica instance to accept writes or not. Writing against
+# a replica instance may be useful to store some ephemeral data (because data
+# written on a replica will be easily deleted after resync with the master) but
+# may also cause problems if clients are writing to it because of a
+# misconfiguration.
+#
+# Since Redis 2.6 by default replicas are read-only.
+#
+# Note: read only replicas are not designed to be exposed to untrusted clients
+# on the internet. It's just a protection layer against misuse of the instance.
+# Still a read only replica exports by default all the administrative commands
+# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
+# security of read only replicas using 'rename-command' to shadow all the
+# administrative / dangerous commands.
+replica-read-only yes
+
+# Replication SYNC strategy: disk or socket.
+#
+# New replicas and reconnecting replicas that are not able to continue the
+# replication process just receiving differences, need to do what is called a
+# "full synchronization". An RDB file is transmitted from the master to the
+# replicas.
+#
+# The transmission can happen in two different ways:
+#
+# 1) Disk-backed: The Redis master creates a new process that writes the RDB
+#                 file on disk. Later the file is transferred by the parent
+#                 process to the replicas incrementally.
+# 2) Diskless: The Redis master creates a new process that directly writes the
+#              RDB file to replica sockets, without touching the disk at all.
+#
+# With disk-backed replication, while the RDB file is generated, more replicas
+# can be queued and served with the RDB file as soon as the current child
+# producing the RDB file finishes its work. With diskless replication instead
+# once the transfer starts, new replicas arriving will be queued and a new
+# transfer will start when the current one terminates.
+#
+# When diskless replication is used, the master waits a configurable amount of
+# time (in seconds) before starting the transfer in the hope that multiple
+# replicas will arrive and the transfer can be parallelized.
+#
+# With slow disks and fast (large bandwidth) networks, diskless replication
+# works better.
+repl-diskless-sync no
+
+# When diskless replication is enabled, it is possible to configure the delay
+# the server waits in order to spawn the child that transfers the RDB via socket
+# to the replicas.
+#
+# This is important since once the transfer starts, it is not possible to serve
+# new replicas arriving, that will be queued for the next RDB transfer, so the
+# server waits a delay in order to let more replicas arrive.
+#
+# The delay is specified in seconds, and by default is 5 seconds. To disable
+# it entirely just set it to 0 seconds and the transfer will start ASAP.
+repl-diskless-sync-delay 5
+
+# -----------------------------------------------------------------------------
+# WARNING: RDB diskless load is experimental. Since in this setup the replica
+# does not immediately store an RDB on disk, it may cause data loss during
+# failovers. RDB diskless load + Redis modules not handling I/O reads may also
+# cause Redis to abort in case of I/O errors during the initial synchronization
+# stage with the master. Use only if your do what you are doing.
+# -----------------------------------------------------------------------------
+#
+# Replica can load the RDB it reads from the replication link directly from the
+# socket, or store the RDB to a file and read that file after it was completely
+# recived from the master.
+#
+# In many cases the disk is slower than the network, and storing and loading
+# the RDB file may increase replication time (and even increase the master's
+# Copy on Write memory and salve buffers).
+# However, parsing the RDB file directly from the socket may mean that we have
+# to flush the contents of the current database before the full rdb was
+# received. For this reason we have the following options:
+#
+# "disabled"    - Don't use diskless load (store the rdb file to the disk first)
+# "on-empty-db" - Use diskless load only when it is completely safe.
+# "swapdb"      - Keep a copy of the current db contents in RAM while parsing
+#                 the data directly from the socket. note that this requires
+#                 sufficient memory, if you don't have it, you risk an OOM kill.
+repl-diskless-load disabled
+
+# Replicas send PINGs to server in a predefined interval. It's possible to
+# change this interval with the repl_ping_replica_period option. The default
+# value is 10 seconds.
+#
+# repl-ping-replica-period 10
+
+# The following option sets the replication timeout for:
+#
+# 1) Bulk transfer I/O during SYNC, from the point of view of replica.
+# 2) Master timeout from the point of view of replicas (data, pings).
+# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings).
+#
+# It is important to make sure that this value is greater than the value
+# specified for repl-ping-replica-period otherwise a timeout will be detected
+# every time there is low traffic between the master and the replica.
+#
+# repl-timeout 60
+
+# Disable TCP_NODELAY on the replica socket after SYNC?
+#
+# If you select "yes" Redis will use a smaller number of TCP packets and
+# less bandwidth to send data to replicas. But this can add a delay for
+# the data to appear on the replica side, up to 40 milliseconds with
+# Linux kernels using a default configuration.
+#
+# If you select "no" the delay for data to appear on the replica side will
+# be reduced but more bandwidth will be used for replication.
+#
+# By default we optimize for low latency, but in very high traffic conditions
+# or when the master and replicas are many hops away, turning this to "yes" may
+# be a good idea.
+repl-disable-tcp-nodelay no
+
+# Set the replication backlog size. The backlog is a buffer that accumulates
+# replica data when replicas are disconnected for some time, so that when a
+# replica wants to reconnect again, often a full resync is not needed, but a
+# partial resync is enough, just passing the portion of data the replica
+# missed while disconnected.
+#
+# The bigger the replication backlog, the longer the time the replica can be
+# disconnected and later be able to perform a partial resynchronization.
+#
+# The backlog is only allocated once there is at least a replica connected.
+#
+# repl-backlog-size 1mb
+
+# After a master has no longer connected replicas for some time, the backlog
+# will be freed. The following option configures the amount of seconds that
+# need to elapse, starting from the time the last replica disconnected, for
+# the backlog buffer to be freed.
+#
+# Note that replicas never free the backlog for timeout, since they may be
+# promoted to masters later, and should be able to correctly "partially
+# resynchronize" with the replicas: hence they should always accumulate backlog.
+#
+# A value of 0 means to never release the backlog.
+#
+# repl-backlog-ttl 3600
+
+# The replica priority is an integer number published by Redis in the INFO
+# output. It is used by Redis Sentinel in order to select a replica to promote
+# into a master if the master is no longer working correctly.
+#
+# A replica with a low priority number is considered better for promotion, so
+# for instance if there are three replicas with priority 10, 100, 25 Sentinel
+# will pick the one with priority 10, that is the lowest.
+#
+# However a special priority of 0 marks the replica as not able to perform the
+# role of master, so a replica with priority of 0 will never be selected by
+# Redis Sentinel for promotion.
+#
+# By default the priority is 100.
+replica-priority 100
+
+# It is possible for a master to stop accepting writes if there are less than
+# N replicas connected, having a lag less or equal than M seconds.
+#
+# The N replicas need to be in "online" state.
+#
+# The lag in seconds, that must be <= the specified value, is calculated from
+# the last ping received from the replica, that is usually sent every second.
+#
+# This option does not GUARANTEE that N replicas will accept the write, but
+# will limit the window of exposure for lost writes in case not enough replicas
+# are available, to the specified number of seconds.
+#
+# For example to require at least 3 replicas with a lag <= 10 seconds use:
+#
+# min-replicas-to-write 3
+# min-replicas-max-lag 10
+#
+# Setting one or the other to 0 disables the feature.
+#
+# By default min-replicas-to-write is set to 0 (feature disabled) and
+# min-replicas-max-lag is set to 10.
+
+# A Redis master is able to list the address and port of the attached
+# replicas in different ways. For example the "INFO replication" section
+# offers this information, which is used, among other tools, by
+# Redis Sentinel in order to discover replica instances.
+# Another place where this info is available is in the output of the
+# "ROLE" command of a master.
+#
+# The listed IP and address normally reported by a replica is obtained
+# in the following way:
+#
+#   IP: The address is auto detected by checking the peer address
+#   of the socket used by the replica to connect with the master.
+#
+#   Port: The port is communicated by the replica during the replication
+#   handshake, and is normally the port that the replica is using to
+#   listen for connections.
+#
+# However when port forwarding or Network Address Translation (NAT) is
+# used, the replica may be actually reachable via different IP and port
+# pairs. The following two options can be used by a replica in order to
+# report to its master a specific set of IP and port, so that both INFO
+# and ROLE will report those values.
+#
+# There is no need to use both the options if you need to override just
+# the port or the IP address.
+#
+# replica-announce-ip 5.5.5.5
+# replica-announce-port 1234
+
+############################### KEYS TRACKING #################################
+
+# Redis implements server assisted support for client side caching of values.
+# This is implemented using an invalidation table that remembers, using
+# 16 millions of slots, what clients may have certain subsets of keys. In turn
+# this is used in order to send invalidation messages to clients. Please
+# to understand more about the feature check this page:
+#
+#   https://redis.io/topics/client-side-caching
+#
+# When tracking is enabled for a client, all the read only queries are assumed
+# to be cached: this will force Redis to store information in the invalidation
+# table. When keys are modified, such information is flushed away, and
+# invalidation messages are sent to the clients. However if the workload is
+# heavily dominated by reads, Redis could use more and more memory in order
+# to track the keys fetched by many clients.
+#
+# For this reason it is possible to configure a maximum fill value for the
+# invalidation table. By default it is set to 1M of keys, and once this limit
+# is reached, Redis will start to evict keys in the invalidation table
+# even if they were not modified, just to reclaim memory: this will in turn
+# force the clients to invalidate the cached values. Basically the table
+# maximum size is a trade off between the memory you want to spend server
+# side to track information about who cached what, and the ability of clients
+# to retain cached objects in memory.
+#
+# If you set the value to 0, it means there are no limits, and Redis will
+# retain as many keys as needed in the invalidation table.
+# In the "stats" INFO section, you can find information about the number of
+# keys in the invalidation table at every given moment.
+#
+# Note: when key tracking is used in broadcasting mode, no memory is used
+# in the server side so this setting is useless.
+#
+# tracking-table-max-keys 1000000
+
+################################## SECURITY ###################################
+
+# Warning: since Redis is pretty fast an outside user can try up to
+# 1 million passwords per second against a modern box. This means that you
+# should use very strong passwords, otherwise they will be very easy to break.
+# Note that because the password is really a shared secret between the client
+# and the server, and should not be memorized by any human, the password
+# can be easily a long string from /dev/urandom or whatever, so by using a
+# long and unguessable password no brute force attack will be possible.
+
+# Redis ACL users are defined in the following format:
+#
+#   user <username> ... acl rules ...
+#
+# For example:
+#
+#   user worker +@list +@connection ~jobs:* on >ffa9203c493aa99
+#
+# The special username "default" is used for new connections. If this user
+# has the "nopass" rule, then new connections will be immediately authenticated
+# as the "default" user without the need of any password provided via the
+# AUTH command. Otherwise if the "default" user is not flagged with "nopass"
+# the connections will start in not authenticated state, and will require
+# AUTH (or the HELLO command AUTH option) in order to be authenticated and
+# start to work.
+#
+# The ACL rules that describe what an user can do are the following:
+#
+#  on           Enable the user: it is possible to authenticate as this user.
+#  off          Disable the user: it's no longer possible to authenticate
+#               with this user, however the already authenticated connections
+#               will still work.
+#  +<command>   Allow the execution of that command
+#  -<command>   Disallow the execution of that command
+#  +@<category> Allow the execution of all the commands in such category
+#               with valid categories are like @admin, @set, @sortedset, ...
+#               and so forth, see the full list in the server.c file where
+#               the Redis command table is described and defined.
+#               The special category @all means all the commands, but currently
+#               present in the server, and that will be loaded in the future
+#               via modules.
+#  +<command>|subcommand    Allow a specific subcommand of an otherwise
+#                           disabled command. Note that this form is not
+#                           allowed as negative like -DEBUG|SEGFAULT, but
+#                           only additive starting with "+".
+#  allcommands  Alias for +@all. Note that it implies the ability to execute
+#               all the future commands loaded via the modules system.
+#  nocommands   Alias for -@all.
+#  ~<pattern>   Add a pattern of keys that can be mentioned as part of
+#               commands. For instance ~* allows all the keys. The pattern
+#               is a glob-style pattern like the one of KEYS.
+#               It is possible to specify multiple patterns.
+#  allkeys      Alias for ~*
+#  resetkeys    Flush the list of allowed keys patterns.
+#  ><password>  Add this passowrd to the list of valid password for the user.
+#               For example >mypass will add "mypass" to the list.
+#               This directive clears the "nopass" flag (see later).
+#  <<password>  Remove this password from the list of valid passwords.
+#  nopass       All the set passwords of the user are removed, and the user
+#               is flagged as requiring no password: it means that every
+#               password will work against this user. If this directive is
+#               used for the default user, every new connection will be
+#               immediately authenticated with the default user without
+#               any explicit AUTH command required. Note that the "resetpass"
+#               directive will clear this condition.
+#  resetpass    Flush the list of allowed passwords. Moreover removes the
+#               "nopass" status. After "resetpass" the user has no associated
+#               passwords and there is no way to authenticate without adding
+#               some password (or setting it as "nopass" later).
+#  reset        Performs the following actions: resetpass, resetkeys, off,
+#               -@all. The user returns to the same state it has immediately
+#               after its creation.
+#
+# ACL rules can be specified in any order: for instance you can start with
+# passwords, then flags, or key patterns. However note that the additive
+# and subtractive rules will CHANGE MEANING depending on the ordering.
+# For instance see the following example:
+#
+#   user alice on +@all -DEBUG ~* >somepassword
+#
+# This will allow "alice" to use all the commands with the exception of the
+# DEBUG command, since +@all added all the commands to the set of the commands
+# alice can use, and later DEBUG was removed. However if we invert the order
+# of two ACL rules the result will be different:
+#
+#   user alice on -DEBUG +@all ~* >somepassword
+#
+# Now DEBUG was removed when alice had yet no commands in the set of allowed
+# commands, later all the commands are added, so the user will be able to
+# execute everything.
+#
+# Basically ACL rules are processed left-to-right.
+#
+# For more information about ACL configuration please refer to
+# the Redis web site at https://redis.io/topics/acl
+
+# ACL LOG
+#
+# The ACL Log tracks failed commands and authentication events associated
+# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked 
+# by ACLs. The ACL Log is stored in and consumes memory. There is no limit
+# to its length.You can reclaim memory with ACL LOG RESET or set a maximum
+# length below.
+acllog-max-len 128
+
+# Using an external ACL file
+#
+# Instead of configuring users here in this file, it is possible to use
+# a stand-alone file just listing users. The two methods cannot be mixed:
+# if you configure users here and at the same time you activate the exteranl
+# ACL file, the server will refuse to start.
+#
+# The format of the external ACL user file is exactly the same as the
+# format that is used inside redis.conf to describe users.
+#
+# aclfile /etc/redis/users.acl
+
+# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatiblity
+# layer on top of the new ACL system. The option effect will be just setting
+# the password for the default user. Clients will still authenticate using
+# AUTH <password> as usually, or more explicitly with AUTH default <password>
+# if they follow the new protocol: both will work.
+#
+# requirepass foobared
+
+# Command renaming (DEPRECATED).
+#
+# ------------------------------------------------------------------------
+# WARNING: avoid using this option if possible. Instead use ACLs to remove
+# commands from the default user, and put them only in some admin user you
+# create for administrative purposes.
+# ------------------------------------------------------------------------
+#
+# It is possible to change the name of dangerous commands in a shared
+# environment. For instance the CONFIG command may be renamed into something
+# hard to guess so that it will still be available for internal-use tools
+# but not available for general clients.
+#
+# Example:
+#
+# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
+#
+# It is also possible to completely kill a command by renaming it into
+# an empty string:
+#
+# rename-command CONFIG ""
+#
+# Please note that changing the name of commands that are logged into the
+# AOF file or transmitted to replicas may cause problems.
+
+################################### CLIENTS ####################################
+
+# Set the max number of connected clients at the same time. By default
+# this limit is set to 10000 clients, however if the Redis server is not
+# able to configure the process file limit to allow for the specified limit
+# the max number of allowed clients is set to the current file limit
+# minus 32 (as Redis reserves a few file descriptors for internal uses).
+#
+# Once the limit is reached Redis will close all the new connections sending
+# an error 'max number of clients reached'.
+#
+# maxclients 10000
+
+############################## MEMORY MANAGEMENT ################################
+
+# Set a memory usage limit to the specified amount of bytes.
+# When the memory limit is reached Redis will try to remove keys
+# according to the eviction policy selected (see maxmemory-policy).
+#
+# If Redis can't remove keys according to the policy, or if the policy is
+# set to 'noeviction', Redis will start to reply with errors to commands
+# that would use more memory, like SET, LPUSH, and so on, and will continue
+# to reply to read-only commands like GET.
+#
+# This option is usually useful when using Redis as an LRU or LFU cache, or to
+# set a hard memory limit for an instance (using the 'noeviction' policy).
+#
+# WARNING: If you have replicas attached to an instance with maxmemory on,
+# the size of the output buffers needed to feed the replicas are subtracted
+# from the used memory count, so that network problems / resyncs will
+# not trigger a loop where keys are evicted, and in turn the output
+# buffer of replicas is full with DELs of keys evicted triggering the deletion
+# of more keys, and so forth until the database is completely emptied.
+#
+# In short... if you have replicas attached it is suggested that you set a lower
+# limit for maxmemory so that there is some free RAM on the system for replica
+# output buffers (but this is not needed if the policy is 'noeviction').
+#
+# maxmemory <bytes>
+
+# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
+# is reached. You can select one from the following behaviors:
+#
+# volatile-lru -> Evict using approximated LRU, only keys with an expire set.
+# allkeys-lru -> Evict any key using approximated LRU.
+# volatile-lfu -> Evict using approximated LFU, only keys with an expire set.
+# allkeys-lfu -> Evict any key using approximated LFU.
+# volatile-random -> Remove a random key having an expire set.
+# allkeys-random -> Remove a random key, any key.
+# volatile-ttl -> Remove the key with the nearest expire time (minor TTL)
+# noeviction -> Don't evict anything, just return an error on write operations.
+#
+# LRU means Least Recently Used
+# LFU means Least Frequently Used
+#
+# Both LRU, LFU and volatile-ttl are implemented using approximated
+# randomized algorithms.
+#
+# Note: with any of the above policies, Redis will return an error on write
+#       operations, when there are no suitable keys for eviction.
+#
+#       At the date of writing these commands are: set setnx setex append
+#       incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
+#       sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
+#       zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
+#       getset mset msetnx exec sort
+#
+# The default is:
+#
+# maxmemory-policy noeviction
+
+# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
+# algorithms (in order to save memory), so you can tune it for speed or
+# accuracy. For default Redis will check five keys and pick the one that was
+# used less recently, you can change the sample size using the following
+# configuration directive.
+#
+# The default of 5 produces good enough results. 10 Approximates very closely
+# true LRU but costs more CPU. 3 is faster but not very accurate.
+#
+# maxmemory-samples 5
+
+# Starting from Redis 5, by default a replica will ignore its maxmemory setting
+# (unless it is promoted to master after a failover or manually). It means
+# that the eviction of keys will be just handled by the master, sending the
+# DEL commands to the replica as keys evict in the master side.
+#
+# This behavior ensures that masters and replicas stay consistent, and is usually
+# what you want, however if your replica is writable, or you want the replica
+# to have a different memory setting, and you are sure all the writes performed
+# to the replica are idempotent, then you may change this default (but be sure
+# to understand what you are doing).
+#
+# Note that since the replica by default does not evict, it may end using more
+# memory than the one set via maxmemory (there are certain buffers that may
+# be larger on the replica, or data structures may sometimes take more memory
+# and so forth). So make sure you monitor your replicas and make sure they
+# have enough memory to never hit a real out-of-memory condition before the
+# master hits the configured maxmemory setting.
+#
+# replica-ignore-maxmemory yes
+
+# Redis reclaims expired keys in two ways: upon access when those keys are
+# found to be expired, and also in background, in what is called the
+# "active expire key". The key space is slowly and interactively scanned
+# looking for expired keys to reclaim, so that it is possible to free memory
+# of keys that are expired and will never be accessed again in a short time.
+#
+# The default effort of the expire cycle will try to avoid having more than
+# ten percent of expired keys still in memory, and will try to avoid consuming
+# more than 25% of total memory and to add latency to the system. However
+# it is possible to increase the expire "effort" that is normally set to
+# "1", to a greater value, up to the value "10". At its maximum value the
+# system will use more CPU, longer cycles (and technically may introduce
+# more latency), and will tollerate less already expired keys still present
+# in the system. It's a tradeoff betweeen memory, CPU and latecy.
+#
+# active-expire-effort 1
+
+############################# LAZY FREEING ####################################
+
+# Redis has two primitives to delete keys. One is called DEL and is a blocking
+# deletion of the object. It means that the server stops processing new commands
+# in order to reclaim all the memory associated with an object in a synchronous
+# way. If the key deleted is associated with a small object, the time needed
+# in order to execute the DEL command is very small and comparable to most other
+# O(1) or O(log_N) commands in Redis. However if the key is associated with an
+# aggregated value containing millions of elements, the server can block for
+# a long time (even seconds) in order to complete the operation.
+#
+# For the above reasons Redis also offers non blocking deletion primitives
+# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and
+# FLUSHDB commands, in order to reclaim memory in background. Those commands
+# are executed in constant time. Another thread will incrementally free the
+# object in the background as fast as possible.
+#
+# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.
+# It's up to the design of the application to understand when it is a good
+# idea to use one or the other. However the Redis server sometimes has to
+# delete keys or flush the whole database as a side effect of other operations.
+# Specifically Redis deletes objects independently of a user call in the
+# following scenarios:
+#
+# 1) On eviction, because of the maxmemory and maxmemory policy configurations,
+#    in order to make room for new data, without going over the specified
+#    memory limit.
+# 2) Because of expire: when a key with an associated time to live (see the
+#    EXPIRE command) must be deleted from memory.
+# 3) Because of a side effect of a command that stores data on a key that may
+#    already exist. For example the RENAME command may delete the old key
+#    content when it is replaced with another one. Similarly SUNIONSTORE
+#    or SORT with STORE option may delete existing keys. The SET command
+#    itself removes any old content of the specified key in order to replace
+#    it with the specified string.
+# 4) During replication, when a replica performs a full resynchronization with
+#    its master, the content of the whole database is removed in order to
+#    load the RDB file just transferred.
+#
+# In all the above cases the default is to delete objects in a blocking way,
+# like if DEL was called. However you can configure each case specifically
+# in order to instead release memory in a non-blocking way like if UNLINK
+# was called, using the following configuration directives.
+
+lazyfree-lazy-eviction no
+lazyfree-lazy-expire no
+lazyfree-lazy-server-del no
+replica-lazy-flush no
+
+# It is also possible, for the case when to replace the user code DEL calls
+# with UNLINK calls is not easy, to modify the default behavior of the DEL
+# command to act exactly like UNLINK, using the following configuration
+# directive:
+
+lazyfree-lazy-user-del no
+
+################################ THREADED I/O #################################
+
+# Redis is mostly single threaded, however there are certain threaded
+# operations such as UNLINK, slow I/O accesses and other things that are
+# performed on side threads.
+#
+# Now it is also possible to handle Redis clients socket reads and writes
+# in different I/O threads. Since especially writing is so slow, normally
+# Redis users use pipelining in order to speedup the Redis performances per
+# core, and spawn multiple instances in order to scale more. Using I/O
+# threads it is possible to easily speedup two times Redis without resorting
+# to pipelining nor sharding of the instance.
+#
+# By default threading is disabled, we suggest enabling it only in machines
+# that have at least 4 or more cores, leaving at least one spare core.
+# Using more than 8 threads is unlikely to help much. We also recommend using
+# threaded I/O only if you actually have performance problems, with Redis
+# instances being able to use a quite big percentage of CPU time, otherwise
+# there is no point in using this feature.
+#
+# So for instance if you have a four cores boxes, try to use 2 or 3 I/O
+# threads, if you have a 8 cores, try to use 6 threads. In order to
+# enable I/O threads use the following configuration directive:
+#
+# io-threads 4
+#
+# Setting io-threads to 1 will just use the main thread as usually.
+# When I/O threads are enabled, we only use threads for writes, that is
+# to thread the write(2) syscall and transfer the client buffers to the
+# socket. However it is also possible to enable threading of reads and
+# protocol parsing using the following configuration directive, by setting
+# it to yes:
+#
+# io-threads-do-reads no
+#
+# Usually threading reads doesn't help much.
+#
+# NOTE 1: This configuration directive cannot be changed at runtime via
+# CONFIG SET. Aso this feature currently does not work when SSL is
+# enabled.
+#
+# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make
+# sure you also run the benchmark itself in threaded mode, using the
+# --threads option to match the number of Redis theads, otherwise you'll not
+# be able to notice the improvements.
+
+############################## APPEND ONLY MODE ###############################
+
+# By default Redis asynchronously dumps the dataset on disk. This mode is
+# good enough in many applications, but an issue with the Redis process or
+# a power outage may result into a few minutes of writes lost (depending on
+# the configured save points).
+#
+# The Append Only File is an alternative persistence mode that provides
+# much better durability. For instance using the default data fsync policy
+# (see later in the config file) Redis can lose just one second of writes in a
+# dramatic event like a server power outage, or a single write if something
+# wrong with the Redis process itself happens, but the operating system is
+# still running correctly.
+#
+# AOF and RDB persistence can be enabled at the same time without problems.
+# If the AOF is enabled on startup Redis will load the AOF, that is the file
+# with the better durability guarantees.
+#
+# Please check http://redis.io/topics/persistence for more information.
+
+appendonly no
+
+# The name of the append only file (default: "appendonly.aof")
+
+appendfilename "appendonly.aof"
+
+# The fsync() call tells the Operating System to actually write data on disk
+# instead of waiting for more data in the output buffer. Some OS will really flush
+# data on disk, some other OS will just try to do it ASAP.
+#
+# Redis supports three different modes:
+#
+# no: don't fsync, just let the OS flush the data when it wants. Faster.
+# always: fsync after every write to the append only log. Slow, Safest.
+# everysec: fsync only one time every second. Compromise.
+#
+# The default is "everysec", as that's usually the right compromise between
+# speed and data safety. It's up to you to understand if you can relax this to
+# "no" that will let the operating system flush the output buffer when
+# it wants, for better performances (but if you can live with the idea of
+# some data loss consider the default persistence mode that's snapshotting),
+# or on the contrary, use "always" that's very slow but a bit safer than
+# everysec.
+#
+# More details please check the following article:
+# http://antirez.com/post/redis-persistence-demystified.html
+#
+# If unsure, use "everysec".
+
+# appendfsync always
+appendfsync everysec
+# appendfsync no
+
+# When the AOF fsync policy is set to always or everysec, and a background
+# saving process (a background save or AOF log background rewriting) is
+# performing a lot of I/O against the disk, in some Linux configurations
+# Redis may block too long on the fsync() call. Note that there is no fix for
+# this currently, as even performing fsync in a different thread will block
+# our synchronous write(2) call.
+#
+# In order to mitigate this problem it's possible to use the following option
+# that will prevent fsync() from being called in the main process while a
+# BGSAVE or BGREWRITEAOF is in progress.
+#
+# This means that while another child is saving, the durability of Redis is
+# the same as "appendfsync none". In practical terms, this means that it is
+# possible to lose up to 30 seconds of log in the worst scenario (with the
+# default Linux settings).
+#
+# If you have latency problems turn this to "yes". Otherwise leave it as
+# "no" that is the safest pick from the point of view of durability.
+
+no-appendfsync-on-rewrite no
+
+# Automatic rewrite of the append only file.
+# Redis is able to automatically rewrite the log file implicitly calling
+# BGREWRITEAOF when the AOF log size grows by the specified percentage.
+#
+# This is how it works: Redis remembers the size of the AOF file after the
+# latest rewrite (if no rewrite has happened since the restart, the size of
+# the AOF at startup is used).
+#
+# This base size is compared to the current size. If the current size is
+# bigger than the specified percentage, the rewrite is triggered. Also
+# you need to specify a minimal size for the AOF file to be rewritten, this
+# is useful to avoid rewriting the AOF file even if the percentage increase
+# is reached but it is still pretty small.
+#
+# Specify a percentage of zero in order to disable the automatic AOF
+# rewrite feature.
+
+auto-aof-rewrite-percentage 100
+auto-aof-rewrite-min-size 64mb
+
+# An AOF file may be found to be truncated at the end during the Redis
+# startup process, when the AOF data gets loaded back into memory.
+# This may happen when the system where Redis is running
+# crashes, especially when an ext4 filesystem is mounted without the
+# data=ordered option (however this can't happen when Redis itself
+# crashes or aborts but the operating system still works correctly).
+#
+# Redis can either exit with an error when this happens, or load as much
+# data as possible (the default now) and start if the AOF file is found
+# to be truncated at the end. The following option controls this behavior.
+#
+# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
+# the Redis server starts emitting a log to inform the user of the event.
+# Otherwise if the option is set to no, the server aborts with an error
+# and refuses to start. When the option is set to no, the user requires
+# to fix the AOF file using the "redis-check-aof" utility before to restart
+# the server.
+#
+# Note that if the AOF file will be found to be corrupted in the middle
+# the server will still exit with an error. This option only applies when
+# Redis will try to read more data from the AOF file but not enough bytes
+# will be found.
+aof-load-truncated yes
+
+# When rewriting the AOF file, Redis is able to use an RDB preamble in the
+# AOF file for faster rewrites and recoveries. When this option is turned
+# on the rewritten AOF file is composed of two different stanzas:
+#
+#   [RDB file][AOF tail]
+#
+# When loading Redis recognizes that the AOF file starts with the "REDIS"
+# string and loads the prefixed RDB file, and continues loading the AOF
+# tail.
+aof-use-rdb-preamble yes
+
+################################ LUA SCRIPTING  ###############################
+
+# Max execution time of a Lua script in milliseconds.
+#
+# If the maximum execution time is reached Redis will log that a script is
+# still in execution after the maximum allowed time and will start to
+# reply to queries with an error.
+#
+# When a long running script exceeds the maximum execution time only the
+# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
+# used to stop a script that did not yet called write commands. The second
+# is the only way to shut down the server in the case a write command was
+# already issued by the script but the user doesn't want to wait for the natural
+# termination of the script.
+#
+# Set it to 0 or a negative value for unlimited execution without warnings.
+lua-time-limit 5000
+
+################################ REDIS CLUSTER  ###############################
+
+# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
+# started as cluster nodes can. In order to start a Redis instance as a
+# cluster node enable the cluster support uncommenting the following:
+#
+# cluster-enabled yes
+
+# Every cluster node has a cluster configuration file. This file is not
+# intended to be edited by hand. It is created and updated by Redis nodes.
+# Every Redis Cluster node requires a different cluster configuration file.
+# Make sure that instances running in the same system do not have
+# overlapping cluster configuration file names.
+#
+# cluster-config-file nodes-6379.conf
+
+# Cluster node timeout is the amount of milliseconds a node must be unreachable
+# for it to be considered in failure state.
+# Most other internal time limits are multiple of the node timeout.
+#
+# cluster-node-timeout 15000
+
+# A replica of a failing master will avoid to start a failover if its data
+# looks too old.
+#
+# There is no simple way for a replica to actually have an exact measure of
+# its "data age", so the following two checks are performed:
+#
+# 1) If there are multiple replicas able to failover, they exchange messages
+#    in order to try to give an advantage to the replica with the best
+#    replication offset (more data from the master processed).
+#    Replicas will try to get their rank by offset, and apply to the start
+#    of the failover a delay proportional to their rank.
+#
+# 2) Every single replica computes the time of the last interaction with
+#    its master. This can be the last ping or command received (if the master
+#    is still in the "connected" state), or the time that elapsed since the
+#    disconnection with the master (if the replication link is currently down).
+#    If the last interaction is too old, the replica will not try to failover
+#    at all.
+#
+# The point "2" can be tuned by user. Specifically a replica will not perform
+# the failover if, since the last interaction with the master, the time
+# elapsed is greater than:
+#
+#   (node-timeout * replica-validity-factor) + repl-ping-replica-period
+#
+# So for example if node-timeout is 30 seconds, and the replica-validity-factor
+# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the
+# replica will not try to failover if it was not able to talk with the master
+# for longer than 310 seconds.
+#
+# A large replica-validity-factor may allow replicas with too old data to failover
+# a master, while a too small value may prevent the cluster from being able to
+# elect a replica at all.
+#
+# For maximum availability, it is possible to set the replica-validity-factor
+# to a value of 0, which means, that replicas will always try to failover the
+# master regardless of the last time they interacted with the master.
+# (However they'll always try to apply a delay proportional to their
+# offset rank).
+#
+# Zero is the only value able to guarantee that when all the partitions heal
+# the cluster will always be able to continue.
+#
+# cluster-replica-validity-factor 10
+
+# Cluster replicas are able to migrate to orphaned masters, that are masters
+# that are left without working replicas. This improves the cluster ability
+# to resist to failures as otherwise an orphaned master can't be failed over
+# in case of failure if it has no working replicas.
+#
+# Replicas migrate to orphaned masters only if there are still at least a
+# given number of other working replicas for their old master. This number
+# is the "migration barrier". A migration barrier of 1 means that a replica
+# will migrate only if there is at least 1 other working replica for its master
+# and so forth. It usually reflects the number of replicas you want for every
+# master in your cluster.
+#
+# Default is 1 (replicas migrate only if their masters remain with at least
+# one replica). To disable migration just set it to a very large value.
+# A value of 0 can be set but is useful only for debugging and dangerous
+# in production.
+#
+# cluster-migration-barrier 1
+
+# By default Redis Cluster nodes stop accepting queries if they detect there
+# is at least an hash slot uncovered (no available node is serving it).
+# This way if the cluster is partially down (for example a range of hash slots
+# are no longer covered) all the cluster becomes, eventually, unavailable.
+# It automatically returns available as soon as all the slots are covered again.
+#
+# However sometimes you want the subset of the cluster which is working,
+# to continue to accept queries for the part of the key space that is still
+# covered. In order to do so, just set the cluster-require-full-coverage
+# option to no.
+#
+# cluster-require-full-coverage yes
+
+# This option, when set to yes, prevents replicas from trying to failover its
+# master during master failures. However the master can still perform a
+# manual failover, if forced to do so.
+#
+# This is useful in different scenarios, especially in the case of multiple
+# data center operations, where we want one side to never be promoted if not
+# in the case of a total DC failure.
+#
+# cluster-replica-no-failover no
+
+# This option, when set to yes, allows nodes to serve read traffic while the
+# the cluster is in a down state, as long as it believes it owns the slots. 
+#
+# This is useful for two cases.  The first case is for when an application 
+# doesn't require consistency of data during node failures or network partitions.
+# One example of this is a cache, where as long as the node has the data it
+# should be able to serve it. 
+#
+# The second use case is for configurations that don't meet the recommended  
+# three shards but want to enable cluster mode and scale later. A 
+# master outage in a 1 or 2 shard configuration causes a read/write outage to the
+# entire cluster without this option set, with it set there is only a write outage.
+# Without a quorum of masters, slot ownership will not change automatically. 
+#
+# cluster-allow-reads-when-down no
+
+# In order to setup your cluster make sure to read the documentation
+# available at http://redis.io web site.
+
+########################## CLUSTER DOCKER/NAT support  ########################
+
+# In certain deployments, Redis Cluster nodes address discovery fails, because
+# addresses are NAT-ted or because ports are forwarded (the typical case is
+# Docker and other containers).
+#
+# In order to make Redis Cluster working in such environments, a static
+# configuration where each node knows its public address is needed. The
+# following two options are used for this scope, and are:
+#
+# * cluster-announce-ip
+# * cluster-announce-port
+# * cluster-announce-bus-port
+#
+# Each instruct the node about its address, client port, and cluster message
+# bus port. The information is then published in the header of the bus packets
+# so that other nodes will be able to correctly map the address of the node
+# publishing the information.
+#
+# If the above options are not used, the normal Redis Cluster auto-detection
+# will be used instead.
+#
+# Note that when remapped, the bus port may not be at the fixed offset of
+# clients port + 10000, so you can specify any port and bus-port depending
+# on how they get remapped. If the bus-port is not set, a fixed offset of
+# 10000 will be used as usually.
+#
+# Example:
+#
+# cluster-announce-ip 10.1.1.5
+# cluster-announce-port 6379
+# cluster-announce-bus-port 6380
+
+################################## SLOW LOG ###################################
+
+# The Redis Slow Log is a system to log queries that exceeded a specified
+# execution time. The execution time does not include the I/O operations
+# like talking with the client, sending the reply and so forth,
+# but just the time needed to actually execute the command (this is the only
+# stage of command execution where the thread is blocked and can not serve
+# other requests in the meantime).
+#
+# You can configure the slow log with two parameters: one tells Redis
+# what is the execution time, in microseconds, to exceed in order for the
+# command to get logged, and the other parameter is the length of the
+# slow log. When a new command is logged the oldest one is removed from the
+# queue of logged commands.
+
+# The following time is expressed in microseconds, so 1000000 is equivalent
+# to one second. Note that a negative number disables the slow log, while
+# a value of zero forces the logging of every command.
+slowlog-log-slower-than 10000
+
+# There is no limit to this length. Just be aware that it will consume memory.
+# You can reclaim memory used by the slow log with SLOWLOG RESET.
+slowlog-max-len 128
+
+################################ LATENCY MONITOR ##############################
+
+# The Redis latency monitoring subsystem samples different operations
+# at runtime in order to collect data related to possible sources of
+# latency of a Redis instance.
+#
+# Via the LATENCY command this information is available to the user that can
+# print graphs and obtain reports.
+#
+# The system only logs operations that were performed in a time equal or
+# greater than the amount of milliseconds specified via the
+# latency-monitor-threshold configuration directive. When its value is set
+# to zero, the latency monitor is turned off.
+#
+# By default latency monitoring is disabled since it is mostly not needed
+# if you don't have latency issues, and collecting data has a performance
+# impact, that while very small, can be measured under big load. Latency
+# monitoring can easily be enabled at runtime using the command
+# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
+latency-monitor-threshold 0
+
+############################# EVENT NOTIFICATION ##############################
+
+# Redis can notify Pub/Sub clients about events happening in the key space.
+# This feature is documented at http://redis.io/topics/notifications
+#
+# For instance if keyspace events notification is enabled, and a client
+# performs a DEL operation on key "foo" stored in the Database 0, two
+# messages will be published via Pub/Sub:
+#
+# PUBLISH __keyspace@0__:foo del
+# PUBLISH __keyevent@0__:del foo
+#
+# It is possible to select the events that Redis will notify among a set
+# of classes. Every class is identified by a single character:
+#
+#  K     Keyspace events, published with __keyspace@<db>__ prefix.
+#  E     Keyevent events, published with __keyevent@<db>__ prefix.
+#  g     Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
+#  $     String commands
+#  l     List commands
+#  s     Set commands
+#  h     Hash commands
+#  z     Sorted set commands
+#  x     Expired events (events generated every time a key expires)
+#  e     Evicted events (events generated when a key is evicted for maxmemory)
+#  t     Stream commands
+#  m     Key-miss events (Note: It is not included in the 'A' class)
+#  A     Alias for g$lshzxet, so that the "AKE" string means all the events
+#        (Except key-miss events which are excluded from 'A' due to their
+#         unique nature).
+#
+#  The "notify-keyspace-events" takes as argument a string that is composed
+#  of zero or multiple characters. The empty string means that notifications
+#  are disabled.
+#
+#  Example: to enable list and generic events, from the point of view of the
+#           event name, use:
+#
+#  notify-keyspace-events Elg
+#
+#  Example 2: to get the stream of the expired keys subscribing to channel
+#             name __keyevent@0__:expired use:
+#
+#  notify-keyspace-events Ex
+#
+#  By default all notifications are disabled because most users don't need
+#  this feature and the feature has some overhead. Note that if you don't
+#  specify at least one of K or E, no events will be delivered.
+notify-keyspace-events ""
+
+############################### GOPHER SERVER #################################
+
+# Redis contains an implementation of the Gopher protocol, as specified in
+# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt).
+#
+# The Gopher protocol was very popular in the late '90s. It is an alternative
+# to the web, and the implementation both server and client side is so simple
+# that the Redis server has just 100 lines of code in order to implement this
+# support.
+#
+# What do you do with Gopher nowadays? Well Gopher never *really* died, and
+# lately there is a movement in order for the Gopher more hierarchical content
+# composed of just plain text documents to be resurrected. Some want a simpler
+# internet, others believe that the mainstream internet became too much
+# controlled, and it's cool to create an alternative space for people that
+# want a bit of fresh air.
+#
+# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol
+# as a gift.
+#
+# --- HOW IT WORKS? ---
+#
+# The Redis Gopher support uses the inline protocol of Redis, and specifically
+# two kind of inline requests that were anyway illegal: an empty request
+# or any request that starts with "/" (there are no Redis commands starting
+# with such a slash). Normal RESP2/RESP3 requests are completely out of the
+# path of the Gopher protocol implementation and are served as usually as well.
+#
+# If you open a connection to Redis when Gopher is enabled and send it
+# a string like "/foo", if there is a key named "/foo" it is served via the
+# Gopher protocol.
+#
+# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher
+# talking), you likely need a script like the following:
+#
+#   https://github.com/antirez/gopher2redis
+#
+# --- SECURITY WARNING ---
+#
+# If you plan to put Redis on the internet in a publicly accessible address
+# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance.
+# Once a password is set:
+#
+#   1. The Gopher server (when enabled, not by default) will still serve
+#      content via Gopher.
+#   2. However other commands cannot be called before the client will
+#      authenticate.
+#
+# So use the 'requirepass' option to protect your instance.
+#
+# To enable Gopher support uncomment the following line and set
+# the option from no (the default) to yes.
+#
+# gopher-enabled no
+
+############################### ADVANCED CONFIG ###############################
+
+# Hashes are encoded using a memory efficient data structure when they have a
+# small number of entries, and the biggest entry does not exceed a given
+# threshold. These thresholds can be configured using the following directives.
+hash-max-ziplist-entries 512
+hash-max-ziplist-value 64
+
+# Lists are also encoded in a special way to save a lot of space.
+# The number of entries allowed per internal list node can be specified
+# as a fixed maximum size or a maximum number of elements.
+# For a fixed maximum size, use -5 through -1, meaning:
+# -5: max size: 64 Kb  <-- not recommended for normal workloads
+# -4: max size: 32 Kb  <-- not recommended
+# -3: max size: 16 Kb  <-- probably not recommended
+# -2: max size: 8 Kb   <-- good
+# -1: max size: 4 Kb   <-- good
+# Positive numbers mean store up to _exactly_ that number of elements
+# per list node.
+# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
+# but if your use case is unique, adjust the settings as necessary.
+list-max-ziplist-size -2
+
+# Lists may also be compressed.
+# Compress depth is the number of quicklist ziplist nodes from *each* side of
+# the list to *exclude* from compression.  The head and tail of the list
+# are always uncompressed for fast push/pop operations.  Settings are:
+# 0: disable all list compression
+# 1: depth 1 means "don't start compressing until after 1 node into the list,
+#    going from either the head or tail"
+#    So: [head]->node->node->...->node->[tail]
+#    [head], [tail] will always be uncompressed; inner nodes will compress.
+# 2: [head]->[next]->node->node->...->node->[prev]->[tail]
+#    2 here means: don't compress head or head->next or tail->prev or tail,
+#    but compress all nodes between them.
+# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
+# etc.
+list-compress-depth 0
+
+# Sets have a special encoding in just one case: when a set is composed
+# of just strings that happen to be integers in radix 10 in the range
+# of 64 bit signed integers.
+# The following configuration setting sets the limit in the size of the
+# set in order to use this special memory saving encoding.
+set-max-intset-entries 512
+
+# Similarly to hashes and lists, sorted sets are also specially encoded in
+# order to save a lot of space. This encoding is only used when the length and
+# elements of a sorted set are below the following limits:
+zset-max-ziplist-entries 128
+zset-max-ziplist-value 64
+
+# HyperLogLog sparse representation bytes limit. The limit includes the
+# 16 bytes header. When an HyperLogLog using the sparse representation crosses
+# this limit, it is converted into the dense representation.
+#
+# A value greater than 16000 is totally useless, since at that point the
+# dense representation is more memory efficient.
+#
+# The suggested value is ~ 3000 in order to have the benefits of
+# the space efficient encoding without slowing down too much PFADD,
+# which is O(N) with the sparse encoding. The value can be raised to
+# ~ 10000 when CPU is not a concern, but space is, and the data set is
+# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
+hll-sparse-max-bytes 3000
+
+# Streams macro node max size / items. The stream data structure is a radix
+# tree of big nodes that encode multiple items inside. Using this configuration
+# it is possible to configure how big a single node can be in bytes, and the
+# maximum number of items it may contain before switching to a new node when
+# appending new stream entries. If any of the following settings are set to
+# zero, the limit is ignored, so for instance it is possible to set just a
+# max entires limit by setting max-bytes to 0 and max-entries to the desired
+# value.
+stream-node-max-bytes 4096
+stream-node-max-entries 100
+
+# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
+# order to help rehashing the main Redis hash table (the one mapping top-level
+# keys to values). The hash table implementation Redis uses (see dict.c)
+# performs a lazy rehashing: the more operation you run into a hash table
+# that is rehashing, the more rehashing "steps" are performed, so if the
+# server is idle the rehashing is never complete and some more memory is used
+# by the hash table.
+#
+# The default is to use this millisecond 10 times every second in order to
+# actively rehash the main dictionaries, freeing memory when possible.
+#
+# If unsure:
+# use "activerehashing no" if you have hard latency requirements and it is
+# not a good thing in your environment that Redis can reply from time to time
+# to queries with 2 milliseconds delay.
+#
+# use "activerehashing yes" if you don't have such hard requirements but
+# want to free memory asap when possible.
+activerehashing yes
+
+# The client output buffer limits can be used to force disconnection of clients
+# that are not reading data from the server fast enough for some reason (a
+# common reason is that a Pub/Sub client can't consume messages as fast as the
+# publisher can produce them).
+#
+# The limit can be set differently for the three different classes of clients:
+#
+# normal -> normal clients including MONITOR clients
+# replica  -> replica clients
+# pubsub -> clients subscribed to at least one pubsub channel or pattern
+#
+# The syntax of every client-output-buffer-limit directive is the following:
+#
+# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
+#
+# A client is immediately disconnected once the hard limit is reached, or if
+# the soft limit is reached and remains reached for the specified number of
+# seconds (continuously).
+# So for instance if the hard limit is 32 megabytes and the soft limit is
+# 16 megabytes / 10 seconds, the client will get disconnected immediately
+# if the size of the output buffers reach 32 megabytes, but will also get
+# disconnected if the client reaches 16 megabytes and continuously overcomes
+# the limit for 10 seconds.
+#
+# By default normal clients are not limited because they don't receive data
+# without asking (in a push way), but just after a request, so only
+# asynchronous clients may create a scenario where data is requested faster
+# than it can read.
+#
+# Instead there is a default limit for pubsub and replica clients, since
+# subscribers and replicas receive data in a push fashion.
+#
+# Both the hard or the soft limit can be disabled by setting them to zero.
+client-output-buffer-limit normal 0 0 0
+client-output-buffer-limit replica 256mb 64mb 60
+client-output-buffer-limit pubsub 32mb 8mb 60
+
+# Client query buffers accumulate new commands. They are limited to a fixed
+# amount by default in order to avoid that a protocol desynchronization (for
+# instance due to a bug in the client) will lead to unbound memory usage in
+# the query buffer. However you can configure it here if you have very special
+# needs, such us huge multi/exec requests or alike.
+#
+# client-query-buffer-limit 1gb
+
+# In the Redis protocol, bulk requests, that are, elements representing single
+# strings, are normally limited ot 512 mb. However you can change this limit
+# here.
+#
+# proto-max-bulk-len 512mb
+
+# Redis calls an internal function to perform many background tasks, like
+# closing connections of clients in timeout, purging expired keys that are
+# never requested, and so forth.
+#
+# Not all tasks are performed with the same frequency, but Redis checks for
+# tasks to perform according to the specified "hz" value.
+#
+# By default "hz" is set to 10. Raising the value will use more CPU when
+# Redis is idle, but at the same time will make Redis more responsive when
+# there are many keys expiring at the same time, and timeouts may be
+# handled with more precision.
+#
+# The range is between 1 and 500, however a value over 100 is usually not
+# a good idea. Most users should use the default of 10 and raise this up to
+# 100 only in environments where very low latency is required.
+hz 10
+
+# Normally it is useful to have an HZ value which is proportional to the
+# number of clients connected. This is useful in order, for instance, to
+# avoid too many clients are processed for each background task invocation
+# in order to avoid latency spikes.
+#
+# Since the default HZ value by default is conservatively set to 10, Redis
+# offers, and enables by default, the ability to use an adaptive HZ value
+# which will temporary raise when there are many connected clients.
+#
+# When dynamic HZ is enabled, the actual configured HZ will be used
+# as a baseline, but multiples of the configured HZ value will be actually
+# used as needed once more clients are connected. In this way an idle
+# instance will use very little CPU time while a busy instance will be
+# more responsive.
+dynamic-hz yes
+
+# When a child rewrites the AOF file, if the following option is enabled
+# the file will be fsync-ed every 32 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+aof-rewrite-incremental-fsync yes
+
+# When redis saves RDB file, if the following option is enabled
+# the file will be fsync-ed every 32 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+rdb-save-incremental-fsync yes
+
+# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
+# idea to start with the default settings and only change them after investigating
+# how to improve the performances and how the keys LFU change over time, which
+# is possible to inspect via the OBJECT FREQ command.
+#
+# There are two tunable parameters in the Redis LFU implementation: the
+# counter logarithm factor and the counter decay time. It is important to
+# understand what the two parameters mean before changing them.
+#
+# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis
+# uses a probabilistic increment with logarithmic behavior. Given the value
+# of the old counter, when a key is accessed, the counter is incremented in
+# this way:
+#
+# 1. A random number R between 0 and 1 is extracted.
+# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1).
+# 3. The counter is incremented only if R < P.
+#
+# The default lfu-log-factor is 10. This is a table of how the frequency
+# counter changes with a different number of accesses with different
+# logarithmic factors:
+#
+# +--------+------------+------------+------------+------------+------------+
+# | factor | 100 hits   | 1000 hits  | 100K hits  | 1M hits    | 10M hits   |
+# +--------+------------+------------+------------+------------+------------+
+# | 0      | 104        | 255        | 255        | 255        | 255        |
+# +--------+------------+------------+------------+------------+------------+
+# | 1      | 18         | 49         | 255        | 255        | 255        |
+# +--------+------------+------------+------------+------------+------------+
+# | 10     | 10         | 18         | 142        | 255        | 255        |
+# +--------+------------+------------+------------+------------+------------+
+# | 100    | 8          | 11         | 49         | 143        | 255        |
+# +--------+------------+------------+------------+------------+------------+
+#
+# NOTE: The above table was obtained by running the following commands:
+#
+#   redis-benchmark -n 1000000 incr foo
+#   redis-cli object freq foo
+#
+# NOTE 2: The counter initial value is 5 in order to give new objects a chance
+# to accumulate hits.
+#
+# The counter decay time is the time, in minutes, that must elapse in order
+# for the key counter to be divided by two (or decremented if it has a value
+# less <= 10).
+#
+# The default value for the lfu-decay-time is 1. A Special value of 0 means to
+# decay the counter every time it happens to be scanned.
+#
+# lfu-log-factor 10
+# lfu-decay-time 1
+
+########################### ACTIVE DEFRAGMENTATION #######################
+#
+# What is active defragmentation?
+# -------------------------------
+#
+# Active (online) defragmentation allows a Redis server to compact the
+# spaces left between small allocations and deallocations of data in memory,
+# thus allowing to reclaim back memory.
+#
+# Fragmentation is a natural process that happens with every allocator (but
+# less so with Jemalloc, fortunately) and certain workloads. Normally a server
+# restart is needed in order to lower the fragmentation, or at least to flush
+# away all the data and create it again. However thanks to this feature
+# implemented by Oran Agra for Redis 4.0 this process can happen at runtime
+# in an "hot" way, while the server is running.
+#
+# Basically when the fragmentation is over a certain level (see the
+# configuration options below) Redis will start to create new copies of the
+# values in contiguous memory regions by exploiting certain specific Jemalloc
+# features (in order to understand if an allocation is causing fragmentation
+# and to allocate it in a better place), and at the same time, will release the
+# old copies of the data. This process, repeated incrementally for all the keys
+# will cause the fragmentation to drop back to normal values.
+#
+# Important things to understand:
+#
+# 1. This feature is disabled by default, and only works if you compiled Redis
+#    to use the copy of Jemalloc we ship with the source code of Redis.
+#    This is the default with Linux builds.
+#
+# 2. You never need to enable this feature if you don't have fragmentation
+#    issues.
+#
+# 3. Once you experience fragmentation, you can enable this feature when
+#    needed with the command "CONFIG SET activedefrag yes".
+#
+# The configuration parameters are able to fine tune the behavior of the
+# defragmentation process. If you are not sure about what they mean it is
+# a good idea to leave the defaults untouched.
+
+# Enabled active defragmentation
+# activedefrag no
+
+# Minimum amount of fragmentation waste to start active defrag
+# active-defrag-ignore-bytes 100mb
+
+# Minimum percentage of fragmentation to start active defrag
+# active-defrag-threshold-lower 10
+
+# Maximum percentage of fragmentation at which we use maximum effort
+# active-defrag-threshold-upper 100
+
+# Minimal effort for defrag in CPU percentage, to be used when the lower
+# threshold is reached
+# active-defrag-cycle-min 1
+
+# Maximal effort for defrag in CPU percentage, to be used when the upper
+# threshold is reached
+# active-defrag-cycle-max 25
+
+# Maximum number of set/hash/zset/list fields that will be processed from
+# the main dictionary scan
+# active-defrag-max-scan-fields 1000
+
+# Jemalloc background thread for purging will be enabled by default
+jemalloc-bg-thread yes
+
+# It is possible to pin different threads and processes of Redis to specific
+# CPUs in your system, in order to maximize the performances of the server.
+# This is useful both in order to pin different Redis threads in different
+# CPUs, but also in order to make sure that multiple Redis instances running
+# in the same host will be pinned to different CPUs.
+#
+# Normally you can do this using the "taskset" command, however it is also
+# possible to this via Redis configuration directly, both in Linux and FreeBSD.
+#
+# You can pin the server/IO threads, bio threads, aof rewrite child process, and
+# the bgsave child process. The syntax to specify the cpu list is the same as
+# the taskset command:
+#
+# Set redis server/io threads to cpu affinity 0,2,4,6:
+# server_cpulist 0-7:2
+#
+# Set bio threads to cpu affinity 1,3:
+# bio_cpulist 1,3
+#
+# Set aof rewrite child process to cpu affinity 8,9,10,11:
+# aof_rewrite_cpulist 8-11
+#
+# Set bgsave child process to cpu affinity 1,10,11
+# bgsave_cpulist 1,10-11

+ 172 - 0
gradlew

@@ -0,0 +1,172 @@
+#!/usr/bin/env sh
+
+##############################################################################
+##
+##  Gradle start up script for UN*X
+##
+##############################################################################
+
+# Attempt to set APP_HOME
+# Resolve links: $0 may be a link
+PRG="$0"
+# Need this for relative symlinks.
+while [ -h "$PRG" ] ; do
+    ls=`ls -ld "$PRG"`
+    link=`expr "$ls" : '.*-> \(.*\)$'`
+    if expr "$link" : '/.*' > /dev/null; then
+        PRG="$link"
+    else
+        PRG=`dirname "$PRG"`"/$link"
+    fi
+done
+SAVED="`pwd`"
+cd "`dirname \"$PRG\"`/" >/dev/null
+APP_HOME="`pwd -P`"
+cd "$SAVED" >/dev/null
+
+APP_NAME="Gradle"
+APP_BASE_NAME=`basename "$0"`
+
+# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+DEFAULT_JVM_OPTS=""
+
+# Use the maximum available, or set MAX_FD != -1 to use that value.
+MAX_FD="maximum"
+
+warn () {
+    echo "$*"
+}
+
+die () {
+    echo
+    echo "$*"
+    echo
+    exit 1
+}
+
+# OS specific support (must be 'true' or 'false').
+cygwin=false
+msys=false
+darwin=false
+nonstop=false
+case "`uname`" in
+  CYGWIN* )
+    cygwin=true
+    ;;
+  Darwin* )
+    darwin=true
+    ;;
+  MINGW* )
+    msys=true
+    ;;
+  NONSTOP* )
+    nonstop=true
+    ;;
+esac
+
+CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
+
+# Determine the Java command to use to start the JVM.
+if [ -n "$JAVA_HOME" ] ; then
+    if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+        # IBM's JDK on AIX uses strange locations for the executables
+        JAVACMD="$JAVA_HOME/jre/sh/java"
+    else
+        JAVACMD="$JAVA_HOME/bin/java"
+    fi
+    if [ ! -x "$JAVACMD" ] ; then
+        die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+    fi
+else
+    JAVACMD="java"
+    which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+fi
+
+# Increase the maximum file descriptors if we can.
+if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
+    MAX_FD_LIMIT=`ulimit -H -n`
+    if [ $? -eq 0 ] ; then
+        if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
+            MAX_FD="$MAX_FD_LIMIT"
+        fi
+        ulimit -n $MAX_FD
+        if [ $? -ne 0 ] ; then
+            warn "Could not set maximum file descriptor limit: $MAX_FD"
+        fi
+    else
+        warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
+    fi
+fi
+
+# For Darwin, add options to specify how the application appears in the dock
+if $darwin; then
+    GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
+fi
+
+# For Cygwin, switch paths to Windows format before running java
+if $cygwin ; then
+    APP_HOME=`cygpath --path --mixed "$APP_HOME"`
+    CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
+    JAVACMD=`cygpath --unix "$JAVACMD"`
+
+    # We build the pattern for arguments to be converted via cygpath
+    ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
+    SEP=""
+    for dir in $ROOTDIRSRAW ; do
+        ROOTDIRS="$ROOTDIRS$SEP$dir"
+        SEP="|"
+    done
+    OURCYGPATTERN="(^($ROOTDIRS))"
+    # Add a user-defined pattern to the cygpath arguments
+    if [ "$GRADLE_CYGPATTERN" != "" ] ; then
+        OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
+    fi
+    # Now convert the arguments - kludge to limit ourselves to /bin/sh
+    i=0
+    for arg in "$@" ; do
+        CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
+        CHECK2=`echo "$arg"|egrep -c "^-"`                                 ### Determine if an option
+
+        if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then                    ### Added a condition
+            eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
+        else
+            eval `echo args$i`="\"$arg\""
+        fi
+        i=$((i+1))
+    done
+    case $i in
+        (0) set -- ;;
+        (1) set -- "$args0" ;;
+        (2) set -- "$args0" "$args1" ;;
+        (3) set -- "$args0" "$args1" "$args2" ;;
+        (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
+        (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
+        (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
+        (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
+        (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
+        (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
+    esac
+fi
+
+# Escape application args
+save () {
+    for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
+    echo " "
+}
+APP_ARGS=$(save "$@")
+
+# Collect all arguments for the java command, following the shell quoting and substitution rules
+eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
+
+# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
+if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
+  cd "$(dirname "$0")"
+fi
+
+exec "$JAVACMD" "$@"

+ 84 - 0
gradlew.bat

@@ -0,0 +1,84 @@
+@if "%DEBUG%" == "" @echo off
+@rem ##########################################################################
+@rem
+@rem  Gradle startup script for Windows
+@rem
+@rem ##########################################################################
+
+@rem Set local scope for the variables with windows NT shell
+if "%OS%"=="Windows_NT" setlocal
+
+set DIRNAME=%~dp0
+if "%DIRNAME%" == "" set DIRNAME=.
+set APP_BASE_NAME=%~n0
+set APP_HOME=%DIRNAME%
+
+@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+set DEFAULT_JVM_OPTS=
+
+@rem Find java.exe
+if defined JAVA_HOME goto findJavaFromJavaHome
+
+set JAVA_EXE=java.exe
+%JAVA_EXE% -version >NUL 2>&1
+if "%ERRORLEVEL%" == "0" goto init
+
+echo.
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:findJavaFromJavaHome
+set JAVA_HOME=%JAVA_HOME:"=%
+set JAVA_EXE=%JAVA_HOME%/bin/java.exe
+
+if exist "%JAVA_EXE%" goto init
+
+echo.
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:init
+@rem Get command-line arguments, handling Windows variants
+
+if not "%OS%" == "Windows_NT" goto win9xME_args
+
+:win9xME_args
+@rem Slurp the command line arguments.
+set CMD_LINE_ARGS=
+set _SKIP=2
+
+:win9xME_args_slurp
+if "x%~1" == "x" goto execute
+
+set CMD_LINE_ARGS=%*
+
+:execute
+@rem Setup the command line
+
+set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+
+@rem Execute Gradle
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
+
+:end
+@rem End local scope for the variables with windows NT shell
+if "%ERRORLEVEL%"=="0" goto mainEnd
+
+:fail
+rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
+rem the _cmd.exe /c_ return code!
+if  not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
+exit /b 1
+
+:mainEnd
+if "%OS%"=="Windows_NT" endlocal
+
+:omega

+ 10 - 0
settings.gradle

@@ -0,0 +1,10 @@
+/*
+ * This file was generated by the Gradle 'init' task.
+ *
+ * The settings file is used to specify which projects to include in your build.
+ * 
+ * Detailed information about configuring a multi-project build in Gradle can be found
+ * in the user guide at https://docs.gradle.org/4.10.2/userguide/multi_project_builds.html
+ */
+
+rootProject.name = 'ccframe-server'

+ 148 - 0
src/main/java/net/coobird/thumbnailator/filters/Border.java

@@ -0,0 +1,148 @@
+package net.coobird.thumbnailator.filters;
+
+import java.awt.Color;
+import java.awt.Graphics;
+import java.awt.image.BufferedImage;
+
+import net.coobird.thumbnailator.filters.ImageFilter;
+
+/**
+ * An {@link ImageFilter} which will enclose an image into a specified
+ * border.
+ * <p>
+ * The fill color used for the enclosing image can be specified, along with
+ * whether or not to crop an image if it is larger than the enclosing image.
+ * 
+ * @author marcoreni
+ * @since 0.4.9
+ *
+ */
+public class Border implements ImageFilter
+{
+	/**
+	 * The size of the border left.
+	 */
+	private final int borderLeft;
+	
+	/**
+	 * The size of the border right.
+	 */
+	private final int borderRight;
+	
+	/**
+	 * The size of the border top.
+	 */
+	private final int borderTop;
+	
+	/**
+	 * The size of the border bottom.
+	 */
+	private final int borderBottom;
+	
+	/**
+	 * The fill color for the background.
+	 */
+	private final Color fillColor;
+	
+	/**
+	 * Instantiates a {@code Border} filter.
+	 * <p>
+	 * No fill color will be applied to the filtered image. If the image to
+	 * filter does not have a transparency channel, the image will be filled
+	 * black.
+	 * <p>
+	 * Crops the enclosed image if the enclosing image is smaller.
+	 * 
+	 * @param size 			The size of the border.
+	 */
+	public Border(int size)
+	{
+		this(size, size, size, size, null);
+	}
+	
+	/**
+	 * Instantiates a {@code Border} filter.
+	 * <p>
+	 * No fill color will be applied to the filtered image. If the image to
+	 * filter does not have a transparency channel, the image will be filled
+	 * black.
+	 * 
+	 * @param horizontalSize		Size of the left and right border.
+	 * @param verticalSize 			Size of the top and bottom border.
+	 */
+	public Border(int horizontalSize, int verticalSize)
+	{
+		this(verticalSize, horizontalSize, verticalSize, horizontalSize, null);
+	}
+	
+	/**
+	 * Instantiates a {@code Border} filter.
+	 * <p>
+	 * 
+	 * @param size			The size of the border.
+	 * @param fillColor		The color to fill portions of the image which is
+	 * 						not covered by the enclosed image. Portions of the
+	 * 						image which is transparent will be filled with
+	 * 						the specified color as well.
+	 */
+	public Border(int size, Color fillColor)
+	{
+		this(size, size, size, size, fillColor);
+	}
+	
+	/**
+	 * Instantiates a {@code Border} filter.
+	 * 
+	 * @param borderTop     The size of the border top.
+	 * @param borderRight   The size of the border right.
+	 * @param borderBottom  The size of the border bottom.
+	 * @param borderLeft    The size of the border left.
+	 * @param fillColor		The color to fill portions of the image which is
+	 * 						not covered by the enclosed image. Portions of the
+	 * 						image which is transparent will be filled with
+	 * 						the specified color as well.
+	 */
+	public Border(int borderTop, int borderRight, int borderBottom, int borderLeft, Color fillColor)
+	{
+		super();
+		this.borderTop = borderTop;
+		this.borderRight = borderRight;
+		this.borderBottom = borderBottom;
+		this.borderLeft = borderLeft;
+
+		this.fillColor = fillColor;
+	}
+
+	public BufferedImage apply(BufferedImage img)
+	{
+		int imageFinalWidth = img.getWidth() + borderLeft + borderRight;
+		int imageFinalHeight = img.getHeight() + borderTop + borderBottom;
+		int containedImageWidth = img.getWidth();
+		int containedImageHeight = img.getHeight();
+		
+		BufferedImage finalImage =
+			new BufferedImage(imageFinalWidth, imageFinalHeight, img.getType());
+		
+		Graphics g = finalImage.getGraphics();
+		
+		if (fillColor == null && !img.getColorModel().hasAlpha())
+		{
+			/*
+			 * Fulfills the specification to use a black fill color for images
+			 * w/o alpha, if the fill color isn't specified.
+			 */
+			g.setColor(Color.black);
+			g.fillRect(0, 0, imageFinalWidth, imageFinalHeight);
+		}
+		else if (fillColor != null)
+		{
+			g.setColor(fillColor);
+			g.fillRect(0, 0, imageFinalWidth, imageFinalHeight);
+		}
+		
+		g.drawImage(img, borderLeft, borderTop, containedImageWidth, containedImageHeight, null);
+		g.dispose();
+		
+		return finalImage;
+	}
+}

+ 280 - 0
src/main/java/net/oschina/j2cache/redis/RedisGenericCache.java

@@ -0,0 +1,280 @@
+/**
+ * Copyright (c) 2015-2017, Winter Lau (javayou@gmail.com).
+ * <p>
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package net.oschina.j2cache.redis;
+
+import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+
+import net.oschina.j2cache.CacheException;
+import net.oschina.j2cache.Level2Cache;
+import redis.clients.jedis.BinaryJedis;
+import redis.clients.jedis.BinaryJedisCommands;
+import redis.clients.jedis.MultiKeyBinaryCommands;
+import redis.clients.jedis.MultiKeyCommands;
+import redis.clients.jedis.ScanParams;
+import redis.clients.jedis.ScanResult;
+
+/**
+ * Redis 缓存操作封装,基于 region+_key 实现多个 Region 的缓存(
+ * @author Winter Lau(javayou@gmail.com)
+ */
+public class RedisGenericCache implements Level2Cache {
+
+    private final static Logger log = LoggerFactory.getLogger(RedisGenericCache.class);
+
+    private String namespace;
+    private String region;
+    private RedisClient client;
+    private int scanCount;
+
+    /**
+     * 缓存构造
+     * @param namespace 命名空间,用于在多个实例中避免 _key 的重叠
+     * @param region 缓存区域的名称
+     * @param client 缓存客户端接口
+     */
+    public RedisGenericCache(String namespace, String region, RedisClient client, int scanCount) {
+        if (region == null || region.isEmpty())
+            region = "_"; // 缺省region
+
+        this.client = client;
+        this.namespace = namespace;
+        this.region = _regionName(region);
+        this.scanCount = scanCount;
+    }
+
+    @Override
+    public boolean supportTTL() {
+        return true;
+    }
+
+    /**
+     * 在region里增加一个可选的层级,作为命名空间,使结构更加清晰
+     * 同时满足小型应用,多个J2Cache共享一个redis database的场景
+     *
+     * @param region
+     * @return
+     */
+    private String _regionName(String region) {
+        if (namespace != null && !namespace.trim().isEmpty())
+            region = namespace + ":" + region;
+        return region;
+    }
+
+    private byte[] _key(String key) {
+        try {
+            return (this.region + ":" + key).getBytes("utf-8");
+        } catch (UnsupportedEncodingException e) {
+            return (this.region + ":" + key).getBytes();
+        }
+    }
+
+    @Override
+    public byte[] getBytes(String key) {
+        try {
+            return client.get().get(_key(key));
+        } finally {
+            client.release();
+        }
+    }
+
+    @Override
+    public List<byte[]> getBytes(Collection<String> keys) {
+        try {
+            BinaryJedisCommands cmd = client.get();
+            if(cmd instanceof MultiKeyBinaryCommands) {
+                byte[][] bytes = keys.stream().map(k -> _key(k)).toArray(byte[][]::new);
+                return ((MultiKeyBinaryCommands)cmd).mget(bytes);
+            }
+            return keys.stream().map(k -> getBytes(k)).collect(Collectors.toList());
+        } finally {
+            client.release();
+        }
+    }
+
+    @Override
+    public void setBytes(String key, byte[] bytes) {
+        try {
+            client.get().set(_key(key), bytes);
+        } finally {
+            client.release();
+        }
+    }
+
+    @Override
+    public void setBytes(Map<String,byte[]> bytes) {
+        try {
+            BinaryJedisCommands cmd = client.get();
+            if(cmd instanceof MultiKeyBinaryCommands) {
+                byte[][] data = new byte[bytes.size() * 2][];
+                int idx = 0;
+                for(String key : bytes.keySet()){
+                    data[idx++] = _key(key);
+                    data[idx++] = bytes.get(key);
+                }
+                ((MultiKeyBinaryCommands)cmd).mset(data);
+            }
+            else
+                bytes.forEach((k,v) -> setBytes(k, v));
+        } finally {
+            client.release();
+        }
+    }
+
+    @Override
+    public void setBytes(String key, byte[] bytes, long timeToLiveInSeconds) {
+        if (timeToLiveInSeconds <= 0) {
+            log.debug(String.format("Invalid timeToLiveInSeconds value : %d , skipped it.", timeToLiveInSeconds));
+            setBytes(key, bytes);
+        }
+        else {
+            try {
+                client.get().setex(_key(key), (int) timeToLiveInSeconds, bytes);
+            } finally {
+                client.release();
+            }
+        }
+    }
+
+    @Override
+    public void setBytes(Map<String,byte[]> bytes, long timeToLiveInSeconds) {
+        try {
+            /* 为了支持 TTL ,没法使用批量写入方法 */
+            /*
+            BinaryJedisCommands cmd = client.get();
+            if(cmd instanceof MultiKeyBinaryCommands) {
+                byte[][] data = new byte[bytes.size() * 2][];
+                int idx = 0;
+                for(String key : bytes.keySet()){
+                    data[idx++] = _key(key);
+                    data[idx++] = bytes.get(key);
+                }
+                ((MultiKeyBinaryCommands)cmd).mset(data);
+            }
+            else
+            */
+
+            if (timeToLiveInSeconds <= 0) {
+                log.debug(String.format("Invalid timeToLiveInSeconds value : %d , skipped it.", timeToLiveInSeconds));
+                setBytes(bytes);
+            }
+            else
+                bytes.forEach((k,v) -> setBytes(k, v, timeToLiveInSeconds));
+        } finally {
+            client.release();
+        }
+    }
+
+    @Override
+    public boolean exists(String key) {
+        try {
+            return client.get().exists(_key(key));
+        } finally {
+            client.release();
+        }
+    }
+
+    /**
+     * 1、线上redis服务大概率会禁用或重命名keys命令;
+     * 2、keys命令效率太低容易致使redis宕机;
+     * 所以使用scan命令替换keys命令操作,增加可用性及提升执行性能
+     */
+    @Override
+    public Collection<String> keys() {
+        try {
+            BinaryJedisCommands cmd = client.get();
+            if (cmd instanceof MultiKeyCommands) {
+            	Collection<String> keys = keys(cmd);
+            	
+                return keys.stream().map(k -> k.substring(this.region.length()+1)).collect(Collectors.toList());
+            }
+        } finally {
+            client.release();
+        }
+        throw new CacheException("keys() not implemented in Redis Generic Mode");
+    }
+
+    private Collection<String> keys(BinaryJedisCommands cmd) { //JIM:已解决缓存清除不了的BUG,j2cache下一个版本已修复,更新后可以移除覆盖
+        Collection<String> keys = new ArrayList<>();
+        String cursor = "0";
+        ScanParams scanParams = new ScanParams();
+        scanParams.match(this.region + ":*");
+        scanParams.count(scanCount); // 这个不是返回结果的数量,应该是每次scan的数量
+        ScanResult<String> scan = ((MultiKeyCommands) cmd).scan(cursor, scanParams);
+        while (null != scan.getStringCursor()) {
+            keys.addAll(scan.getResult()); // 这一次scan match到的结果
+            if (!StringUtils.equals(cursor, scan.getStringCursor())) { // 不断拿着新的cursor scan,最终会拿到所有匹配的值
+                scan = ((MultiKeyCommands) cmd).scan(scan.getStringCursor(), scanParams);
+                continue;
+            } else {
+                break;
+            }
+        }
+        return keys;
+    }
+
+    @Override
+    public void evict(String...keys) {
+        try {
+            BinaryJedisCommands cmd = client.get();
+            if (cmd instanceof BinaryJedis) {
+                byte[][] bytes = Arrays.stream(keys).map(k -> _key(k)).toArray(byte[][]::new);
+                ((BinaryJedis)cmd).del(bytes);
+            }
+            else {
+                for (String key : keys)
+                    cmd.del(_key(key));
+            }
+        } finally {
+            client.release();
+        }
+    }
+
+    /**
+     * 已使用scan命令替换keys命令操作
+     */
+    @Override
+    public void clear() {
+        try {
+            BinaryJedisCommands cmd = client.get();
+            if (cmd instanceof MultiKeyCommands) {
+            	Collection<String> keysCollection = keys(cmd);
+            	if (keysCollection.size() > 0) {
+                    // 修复 ERR Protocol error: invalid multibulk length 的错误,一次性只能发送16k个键
+                    for(List<String> partList: Lists.partition(new ArrayList<String>(keysCollection), 16*1024)) {
+                        String[] keys = partList.stream().toArray(String[]::new);
+                        ((MultiKeyCommands) cmd).del(keys);
+                    }
+            	}
+            }
+            else
+                throw new CacheException("clear() not implemented in Redis Generic Mode");
+        } finally {
+            client.release();
+        }
+    }
+}

+ 101 - 0
src/main/java/org/ccframe/app/App.java

@@ -0,0 +1,101 @@
+package org.ccframe.app;
+
+import java.io.InputStream;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.TimeZone;
+
+import javax.annotation.PostConstruct;
+
+import org.apache.commons.jxpath.JXPathContext;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.ccframe.config.GlobalEx;
+import org.springframework.boot.SpringApplication;
+import org.springframework.boot.autoconfigure.SpringBootApplication;
+import org.springframework.boot.autoconfigure.data.redis.RedisAutoConfiguration;
+import org.springframework.boot.autoconfigure.data.redis.RedisReactiveAutoConfiguration;
+import org.springframework.boot.autoconfigure.security.servlet.UserDetailsServiceAutoConfiguration;
+import org.springframework.boot.builder.SpringApplicationBuilder;
+import org.springframework.boot.context.ApplicationPidFileWriter;
+import org.springframework.boot.web.servlet.ServletComponentScan;
+import org.springframework.boot.web.servlet.support.SpringBootServletInitializer;
+import org.springframework.context.annotation.Bean;
+import org.springframework.scheduling.annotation.EnableAsync;
+import org.springframework.scheduling.annotation.EnableScheduling;
+//import org.springframework.scheduling.annotation.EnableScheduling;
+import org.springframework.web.multipart.MultipartResolver;
+import org.springframework.web.multipart.commons.CommonsMultipartResolver;
+import org.yaml.snakeyaml.Yaml;
+
+import uk.org.lidalia.sysoutslf4j.context.SysOutOverSLF4J;
+
+@SpringBootApplication(exclude = {RedisAutoConfiguration.class, RedisReactiveAutoConfiguration.class, UserDetailsServiceAutoConfiguration.class})
+@EnableAsync
+@EnableScheduling
+@ServletComponentScan({"org.ccframe.commons.filter","org.ccframe.commons.servlet"})
+public class App extends SpringBootServletInitializer{
+
+	private static final String SYS_START_STR = "系统启动模式:";
+
+	private static Date contextInitStartTime;
+
+	public static void main(String[] args) {
+        System.setProperty("user.timezone", GlobalEx.TIMEZONE);
+		TimeZone.setDefault(TimeZone.getTimeZone(GlobalEx.TIMEZONE));
+
+		checkSysIntMode();
+		SysOutOverSLF4J.sendSystemOutAndErrToSLF4J();
+
+		SpringApplication springApplication = new SpringApplication(App.class);
+		springApplication.addListeners(new ApplicationPidFileWriter());
+		springApplication.run(args);
+	}
+
+	@Override//为了打包springboot项目
+    protected SpringApplicationBuilder configure(SpringApplicationBuilder builder) {
+        return builder.sources(this.getClass());
+    }
+	
+	public static Date getContextInitStartTime() {
+		return contextInitStartTime;
+	}
+
+	@Bean(name="multipartResolver")
+	public MultipartResolver multipartResolver(){
+		return new CommonsMultipartResolver(); //文件上传支持
+	}
+	
+	private static boolean qserver;
+
+    private static void checkSysIntMode() {
+    	String forceInitMode = System.getProperty("forceInitMode", null); //自动发布或正式时,通过环境变量-D强制指定是重置还是不处理
+    	String modeStr = null;
+    	if(forceInitMode != null){
+    		modeStr = forceInitMode;
+    	}else{
+    		Yaml yaml = new Yaml();
+    		try (InputStream in = App.class.getClass().getResourceAsStream("/application.yml");){
+    			Map<String, Object> properties = yaml.loadAs(in, HashMap.class);
+    			JXPathContext jc = JXPathContext.newContext(properties);
+    			String activeProfile = jc.getValue("spring/profiles/active").toString();
+    			try(InputStream profileIn = App.class.getClass().getResourceAsStream("/application-" + activeProfile + ".yml")){
+        			properties = yaml.loadAs(profileIn, HashMap.class);
+        			jc = JXPathContext.newContext(properties);
+        			modeStr = jc.getValue("app/init/mode").toString();
+        			qserver = Boolean.valueOf(jc.getValue("app/init/qserver").toString());
+    			}
+    		}catch (Exception e) {
+    			throw new RuntimeException(e);
+    		}
+    	}
+    	System.out.println(SYS_START_STR + (qserver ? ("【qserver节点】" + ("create".equals(modeStr) ? "数据重新创建模式": "标准模式")): "【普通节点】标准模式") + "..."); //此显示不会记录到log4j,后面都会有记录日志
+    	contextInitStartTime = new Date();
+	}
+
+	@PostConstruct
+	void setDefaultTimezone() {
+    	TimeZone.setDefault(TimeZone.getTimeZone(GlobalEx.TIMEZONE));
+	}
+}

+ 28 - 0
src/main/java/org/ccframe/app/CoreConfig.java

@@ -0,0 +1,28 @@
+package org.ccframe.app;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.ComponentScan;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.data.jpa.repository.config.EnableJpaRepositories;
+
+@Configuration
+@EnableJpaRepositories(basePackages = "org.ccframe.subsys.*.repository")
+@ComponentScan({
+	"org.ccframe.commons.helper",
+	"org.ccframe.subsys.*.service",
+	"org.ccframe.subsys.*.queue",
+	"org.ccframe.subsys.*.processor",
+	"org.ccframe.subsys.*.controller",
+	"org.ccframe.sdk.*.controller",
+	"org.ccframe.thirdadapter.sms"
+})
+public class CoreConfig {
+
+	@Bean
+	public Map<String, String > replacementMap(){
+		return new HashMap<String, String>(); //预置数据字典转换
+	}
+}

+ 96 - 0
src/main/java/org/ccframe/app/DataSourceConfig.java

@@ -0,0 +1,96 @@
+package org.ccframe.app;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.sql.DataSource;
+
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.boot.autoconfigure.AutoConfigureAfter;
+import org.springframework.boot.autoconfigure.AutoConfigureBefore;
+import org.springframework.boot.autoconfigure.quartz.QuartzAutoConfiguration;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+import org.springframework.boot.web.servlet.ServletRegistrationBean;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.context.annotation.DependsOn;
+import org.springframework.context.annotation.Primary;
+import org.springframework.orm.jpa.JpaVendorAdapter;
+import org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean;
+import org.springframework.orm.jpa.vendor.HibernateJpaVendorAdapter;
+import org.springframework.transaction.PlatformTransactionManager;
+
+import com.alibaba.druid.pool.DruidDataSource;
+import com.alibaba.druid.support.http.StatViewServlet;
+
+import org.apache.shardingsphere.shardingjdbc.spring.boot.SpringBootConfiguration;
+import org.ccframe.commons.mvc.CcframeTransactionManager;
+
+@Configuration
+@AutoConfigureAfter(SpringBootConfiguration.class) //引入分片
+@AutoConfigureBefore(QuartzAutoConfiguration.class)
+public class DataSourceConfig {
+	
+	@Value("${app.druid.pass:tcxh}")
+	private String druidPass;
+	
+	@Value("${app.druid.accessHost:127.0.0.1}") // 连接池UI默认只允许本地调试使用
+	private String accessHost;
+
+	@Value("${app.init.mode:none}")
+	private String initMode;
+	
+	@Bean
+	@ConfigurationProperties(prefix = "spring.datasource")
+	public JpaVendorAdapter jpaVendorAdapter() {
+		return new HibernateJpaVendorAdapter();
+	}
+
+	@Bean
+	public LocalContainerEntityManagerFactoryBean entityManagerFactory(DataSource dataSource, JpaVendorAdapter jpaVendorAdapter)
+	{
+	    LocalContainerEntityManagerFactoryBean bean=new LocalContainerEntityManagerFactoryBean();      
+	    bean.setDataSource(dataSource);
+	    bean.setPackagesToScan(new String[] {"org.ccframe.subsys.*.domain.entity"});
+	    bean.setJpaVendorAdapter(jpaVendorAdapter);
+	    bean.getJpaPropertyMap().put("hibernate.hbm2ddl.auto",initMode);
+	    return bean;
+	}
+
+//	@Primary
+//	@Bean
+//	public DataSource dataSource(DataSource shardingDataSource) {
+//		return shardingDataSource;
+//	}
+
+/*	@Primary
+	@ConfigurationProperties(prefix = "spring.datasource")
+	@Bean(initMethod = "init", destroyMethod = "close")
+	public DataSource dataSource() {
+		return new DruidDataSource();
+	}
+*/
+	@Bean
+	public ServletRegistrationBean<StatViewServlet> statViewServlet() {
+		ServletRegistrationBean<StatViewServlet> bean = new ServletRegistrationBean<StatViewServlet>(
+				new StatViewServlet(), "/druid/*");
+		Map<String, String> initParams = new HashMap<>();
+
+		initParams.put("loginUsername", "druid");
+		initParams.put("loginPassword", druidPass);
+		initParams.put("resetEnable", "true");
+		initParams.put("allow", accessHost);
+		
+		bean.setInitParameters(initParams);
+		return bean;
+	}
+	
+	@Bean
+	//@DependsOn(value = "dataSource")
+	public PlatformTransactionManager transactionManager(DataSource dataSource) {
+		CcframeTransactionManager ccframeTransactionManager = new CcframeTransactionManager();
+		ccframeTransactionManager.setDataSource(dataSource);
+		return ccframeTransactionManager;     
+	}
+}

+ 36 - 0
src/main/java/org/ccframe/app/ElasticsearchConfig.java

@@ -0,0 +1,36 @@
+package org.ccframe.app;
+
+import org.elasticsearch.client.RestHighLevelClient;
+import org.springframework.beans.factory.annotation.Qualifier;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.context.annotation.DependsOn;
+import org.springframework.data.elasticsearch.client.ClientConfiguration;
+import org.springframework.data.elasticsearch.client.RestClients;
+import org.springframework.data.elasticsearch.core.ElasticsearchRestTemplate;
+import org.springframework.data.elasticsearch.repository.config.EnableElasticsearchRepositories;
+
+import org.ccframe.config.GlobalEx;
+
+@Configuration
+@EnableElasticsearchRepositories(basePackages = "org.ccframe.subsys.*.search")
+@ConditionalOnProperty(value = "app.search.embedded", havingValue = "false")
+public class ElasticsearchConfig{
+
+	@Value("${spring.data.elasticsearch.cluster-nodes:}")
+	private String clusterNodes;
+
+	@Bean(destroyMethod = "close")
+	public RestHighLevelClient restHighLevelClient() {
+		ClientConfiguration clientConfiguration = ClientConfiguration.builder().connectedTo(clusterNodes.split(GlobalEx.DEFAULT_TEXT_SPLIT_CHAR)).withConnectTimeout(15000).withSocketTimeout(15000).build();
+		return RestClients.create(clientConfiguration).rest();
+    }
+	
+    @Bean
+	@DependsOn(value = "restHighLevelClient")
+    public ElasticsearchRestTemplate elasticsearchTemplate(RestHighLevelClient elasticsearchClient) {
+        return new ElasticsearchRestTemplate(elasticsearchClient);
+    }
+}

+ 63 - 0
src/main/java/org/ccframe/app/ElasticsearchEmbeddedConfig.java

@@ -0,0 +1,63 @@
+package org.ccframe.app;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.node.NodeClient;
+import org.springframework.beans.factory.annotation.Qualifier;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.context.annotation.DependsOn;
+import org.springframework.data.elasticsearch.client.ClientConfiguration;
+import org.springframework.data.elasticsearch.client.NodeClientFactoryBean;
+import org.springframework.data.elasticsearch.client.RestClients;
+import org.springframework.data.elasticsearch.core.ElasticsearchTemplate;
+import org.springframework.data.elasticsearch.repository.config.EnableElasticsearchRepositories;
+import org.ccframe.commons.util.ESPathUtil;
+import org.ccframe.config.GlobalEx;
+
+/**
+ * 开启embedded模式后,仍旧会开启9200,9300端口,因此如果一台机器有多个应用,为了避免冲突可以考虑装进docker
+ * @author JIM
+ *
+ */
+@Configuration
+@EnableElasticsearchRepositories(basePackages = "org.ccframe.subsys.*.search")
+@ConditionalOnProperty(value = "app.search.embedded", havingValue = "true")
+public class ElasticsearchEmbeddedConfig{
+	
+	static{
+        System.setProperty("es.set.netty.runtime.available.processors", "false");
+    }
+	
+	@Value("${spring.data.elasticsearch.cluster-nodes:}")
+	private String clusterNodes;
+
+	@Value("${app.productName}")
+	private String productName;
+	
+    @Value("${app.init.mode:none}")
+    public void setInitMode(String initMode){
+    	String forceInitMode = System.getProperty("forceInitMode", null); //自动发布时,通过环境变量-D强制指定是重置还是不处理
+    	this.initMode = (forceInitMode == null ? initMode : forceInitMode);
+    }
+    
+	private String initMode;
+	
+	@Bean
+	public NodeClientFactoryBean client() {
+		NodeClientFactoryBean nodeClientFactoryBean = new NodeClientFactoryBean(true);
+		nodeClientFactoryBean.setClusterName("create".equals(initMode) ? ESPathUtil.cleanUpCluster(productName) : productName);
+		nodeClientFactoryBean.setPathConfiguration("elasticsearch.yml");
+		nodeClientFactoryBean.setPathData(ESPathUtil.getDataDir());
+		nodeClientFactoryBean.setPathHome(ESPathUtil.getHomeDir());
+		nodeClientFactoryBean.setEnableHttp(false); //没用,端口还是会开放
+		return nodeClientFactoryBean;
+    }
+	
+    @Bean
+	@DependsOn(value = "client")
+    public ElasticsearchTemplate elasticsearchTemplate(Client elasticsearchClient) {
+        return new ElasticsearchTemplate(elasticsearchClient);
+    }
+}

+ 20 - 0
src/main/java/org/ccframe/app/ErrorPageConfig.java

@@ -0,0 +1,20 @@
+package org.ccframe.app;
+
+import org.springframework.boot.web.server.ConfigurableWebServerFactory;
+import org.springframework.boot.web.server.ErrorPage;
+import org.springframework.boot.web.server.WebServerFactoryCustomizer;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.http.HttpStatus;
+
+@Configuration
+public class ErrorPageConfig {
+    @Bean
+    public WebServerFactoryCustomizer<ConfigurableWebServerFactory> webServerFactoryCustomizer() {
+        return (factory -> {
+            ErrorPage errorPage404 = new ErrorPage(HttpStatus.NOT_FOUND, "/404/index.html");
+            factory.addErrorPages(errorPage404);
+        });
+    }
+ 
+}

+ 96 - 0
src/main/java/org/ccframe/app/GlobalExceptionConfig.java

@@ -0,0 +1,96 @@
+package org.ccframe.app;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.context.MessageSource;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.context.support.AbstractMessageSource;
+import org.springframework.http.MediaType;
+import org.springframework.orm.ObjectOptimisticLockingFailureException;
+import org.springframework.web.servlet.HandlerExceptionResolver;
+import org.springframework.web.servlet.ModelAndView;
+
+import com.alibaba.fastjson.JSON;
+import com.alibaba.fastjson.JSONObject;
+
+import io.jsonwebtoken.JwtException;
+
+import org.ccframe.commons.filter.CcRequestLoggingFilter;
+import org.ccframe.commons.helper.SpringContextHelper;
+import org.ccframe.commons.util.BusinessException;
+import org.ccframe.config.ResGlobalEx;
+import org.ccframe.subsys.core.dto.ErrorObjectResp;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Configuration
+@Slf4j
+public class GlobalExceptionConfig implements HandlerExceptionResolver {
+	
+	@Autowired
+	private Map<String, String> uriExceptionStatusMap = new HashMap<String, String>(); //必要时将异常装填转换为200,在输出异常对象里包含实际的错误
+
+	@Override
+	public ModelAndView resolveException(HttpServletRequest request, HttpServletResponse response, Object handler, Exception ex) {
+		ErrorObjectResp error = null;
+		if (ex instanceof BusinessException) {
+			BusinessException busException = (BusinessException) ex;
+			String errorText = SpringContextHelper.getBean(
+					MessageSource.class).getMessage(
+					busException.getCode(), busException.getArgs(),
+					request.getLocale());
+			error = new ErrorObjectResp(busException.getCode(), errorText,
+					busException.getViewData());
+			if(busException.isUseSimpleLog()){
+				log.error(errorText);
+			}else{
+				log.error(errorText, ex);
+			}
+		} else if(ex instanceof ObjectOptimisticLockingFailureException) { //乐观锁冲突,系统开小差了,请稍后再试
+			error = new ErrorObjectResp(ResGlobalEx.ERRORS_OPTIMISTIC_LOCK_EXCEPTION, "系统开小差了,请稍后再试" , null);
+			log.error(ex.getMessage(), ex);
+		} else {
+			error = new ErrorObjectResp(ResGlobalEx.ERRORS_EXCEPTION, ex.getMessage() == null ? ex.toString() : ex.getMessage(), null);
+			log.error(ex.getMessage(), ex);
+		}
+		//TODO 处理AccessDeniedException等权限异常对应到403
+		CcRequestLoggingFilter.pendingLog(); //服务器可以记录出错时的请求啦😂
+		if(ex instanceof JwtException) { //未登陆异常都是403
+			response.setStatus(HttpServletResponse.SC_FORBIDDEN);
+		}else {
+			response.setStatus(checkStatus(request));
+		}
+		
+		response.setContentType(MediaType.APPLICATION_JSON_VALUE);
+        response.setCharacterEncoding("UTF-8");
+        response.setHeader("Cache-Control", "no-cache, must-revalidate");  
+        try {
+        	JSONObject errorResult = new JSONObject();
+        	errorResult.put("errorObjectResp", error);
+            response.getWriter().write(JSON.toJSONString(errorResult));
+        } catch (IOException e) {
+            log.error("与客户端通讯异常:" + e.getMessage(), e);
+            e.printStackTrace();
+        }
+		
+		return new ModelAndView();
+	}
+
+	private int checkStatus(HttpServletRequest request) {
+		String requestUri = request.getRequestURI();
+		for(Entry<String, String> entry: uriExceptionStatusMap.entrySet()) {
+			if(requestUri.startsWith(entry.getKey())) {
+				return new Integer(entry.getValue());
+			}
+		}
+		return HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
+	}
+	
+}

+ 43 - 0
src/main/java/org/ccframe/app/LocaleConfig.java

@@ -0,0 +1,43 @@
+package org.ccframe.app;
+
+import java.util.Locale;
+
+import org.springframework.boot.autoconfigure.web.servlet.WebMvcProperties.LocaleResolver;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.web.servlet.LocaleContextResolver;
+import org.springframework.web.servlet.config.annotation.InterceptorRegistry;
+import org.springframework.web.servlet.i18n.LocaleChangeInterceptor;
+import org.springframework.web.servlet.i18n.SessionLocaleResolver;
+
+/**
+ * 配置国际化语言
+ */
+@Configuration
+public class LocaleConfig {
+
+    /**
+     * 默认解析器 其中locale表示默认语言
+     */
+    @Bean
+    public LocaleContextResolver localeResolver() {
+        SessionLocaleResolver localeResolver = new SessionLocaleResolver();
+        localeResolver.setDefaultLocale(Locale.CHINESE);
+        return localeResolver;
+    }
+
+    /**
+     * 默认拦截器 其中lang表示切换语言的参数名
+     */
+    @Bean
+    public WebMvcConfigurer localeInterceptor() {
+        return new WebMvcConfigurer() {
+            @Override
+            public void addInterceptors(InterceptorRegistry registry) {
+                LocaleChangeInterceptor localeInterceptor = new LocaleChangeInterceptor();
+                localeInterceptor.setParamName("lang");
+                registry.addInterceptor(localeInterceptor);
+            }
+        };
+    }
+}

+ 18 - 0
src/main/java/org/ccframe/app/MethodSecurityConfig.java

@@ -0,0 +1,18 @@
+package org.ccframe.app;
+
+import org.springframework.context.annotation.Configuration;
+import org.springframework.security.access.method.MethodSecurityMetadataSource;
+import org.springframework.security.config.annotation.method.configuration.EnableGlobalMethodSecurity;
+import org.springframework.security.config.annotation.method.configuration.GlobalMethodSecurityConfiguration;
+
+import org.ccframe.commons.auth.RoleAuthAnnotationSecurityMetadataSource;
+
+@Configuration
+@EnableGlobalMethodSecurity(securedEnabled = true)
+public class MethodSecurityConfig extends GlobalMethodSecurityConfiguration {
+
+    @Override
+    protected MethodSecurityMetadataSource customMethodSecurityMetadataSource() {
+        return new RoleAuthAnnotationSecurityMetadataSource(); //解析RoleAuth对应到Security规则
+    }
+}

+ 30 - 0
src/main/java/org/ccframe/app/MinioConfig.java

@@ -0,0 +1,30 @@
+package org.ccframe.app;
+
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+import io.minio.MinioClient;
+
+@Configuration
+@ConditionalOnProperty(value = "app.minio.enabled", havingValue = "true")
+public class MinioConfig {
+
+	@Value("${app.minio.url:127.0.0.1}") // MinIO地址,调试时为本地
+	private String url;
+
+	@Value("${app.minio.userName:minioadmin}")
+	private String userName;
+
+	@Value("${app.minio.userPass:minioadmin}")
+	private String userPass;
+
+	@Bean
+    public MinioClient minioClient(){
+        MinioClient.Builder builder = MinioClient.builder();
+        builder.endpoint(url);
+        builder.credentials(userName, userPass);
+        return builder.build();
+    }
+}

+ 12 - 0
src/main/java/org/ccframe/app/ProcessorConfig.java

@@ -0,0 +1,12 @@
+package org.ccframe.app;
+
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.context.annotation.ComponentScan;
+import org.springframework.context.annotation.Configuration;
+
+@Configuration
+@ConditionalOnProperty(value = "app.init.qserver", havingValue = "true")
+@ComponentScan({"org.ccframe.subsys.*.job"}) //如果设置为队列和定时节点,才会跑Job
+public class ProcessorConfig {
+	
+}

+ 27 - 0
src/main/java/org/ccframe/app/QuartzConfiguration.java

@@ -0,0 +1,27 @@
+package org.ccframe.app;
+
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.autoconfigure.quartz.SchedulerFactoryBeanCustomizer;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+import javax.sql.DataSource;
+
+import org.ccframe.commons.quartz.QuartzJobFactory;
+
+ 
+/**
+ * Quartz Configuration.
+ * 
+ * @since 1.0.0 2017年11月23日
+ * @author <a href="https://waylau.com">Way Lau</a> 
+ */
+@Configuration
+public class QuartzConfiguration {
+ 
+	@Bean
+    public QuartzJobFactory jobFactory() {
+		QuartzJobFactory factory = new QuartzJobFactory();
+		return factory;
+    }
+}

+ 73 - 0
src/main/java/org/ccframe/app/RedissonConfig.java

@@ -0,0 +1,73 @@
+package org.ccframe.app;
+
+import org.ccframe.commons.queue.QueueServer;
+import org.redisson.Redisson;
+import org.redisson.api.RedissonClient;
+import org.redisson.config.Config;
+import org.redisson.config.ReadMode;
+import org.redisson.config.SentinelServersConfig;
+import org.redisson.config.SingleServerConfig;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+//TODO 按照https://blog.csdn.net/unclecoco/article/details/99412915 支持多种配置模式
+@Configuration
+public class RedissonConfig {
+
+	@Value("${app.cache.redisHosts}")
+	private String[] redisHosts;
+
+    @Value("${app.init.qserver}")
+	private boolean qserver;
+
+    @Value("${app.redisson.minimumIdleSize: 24}")
+	private int minimumIdleSize;
+
+    @Value("${app.redisson.connectionPoolSize: 64}")
+	private int connectionPoolSize;
+
+    @Value("${app.redisson.database: 0}")
+	private int database;
+
+    @Bean
+    public RedissonClient redissonClient() { //TODO 自动单个或多个支持,使用yml统一配置
+        Config config = new Config();
+        if(redisHosts.length > 1) { //哨兵模式
+//        	ClusterServersConfig clusterServersConfig = config.useClusterServers().setScanInterval(2000);
+//        	for(String redisHost: redisHosts) {
+//        		clusterServersConfig.addNodeAddress("redis://" + redisHost);
+//        	}
+        	SentinelServersConfig sentinelServersConfig = config.useSentinelServers()
+    			.setMasterConnectionMinimumIdleSize(minimumIdleSize)
+    			.setSlaveConnectionMinimumIdleSize(minimumIdleSize)
+    			.setMasterConnectionPoolSize(connectionPoolSize)
+    			.setSlaveConnectionPoolSize(connectionPoolSize)
+    			.setReadMode(ReadMode.MASTER_SLAVE)
+    			.setMasterName("mymaster")
+    			.setCheckSentinelsList(false); //默认至少2节点,极小高可用,1个节点也可以跑
+        	for(String redisHost: redisHosts) {
+        		sentinelServersConfig.addSentinelAddress("redis://" + redisHost);
+	    	}
+        	if(qserver) {
+        		sentinelServersConfig.setSubscriptionConnectionMinimumIdleSize(QueueServer.getSubscriptionCount());
+        	}
+    		sentinelServersConfig.setPingConnectionInterval(1000);
+    		sentinelServersConfig.setDatabase(database);
+        }else {
+        	SingleServerConfig singleServerConfig = config.useSingleServer()
+    			.setConnectionMinimumIdleSize(minimumIdleSize)
+    			.setConnectionPoolSize(connectionPoolSize)
+    			.setAddress("redis://" + redisHosts[0])
+    			.setPingConnectionInterval(1000);
+        	if(qserver) {
+        		singleServerConfig.setSubscriptionConnectionMinimumIdleSize(QueueServer.getSubscriptionCount());
+        	}
+        	singleServerConfig.setPingConnectionInterval(1000);
+        	singleServerConfig.setDatabase(database);
+        }
+        RedissonClient redisson = Redisson.create(config);
+
+        return redisson;
+    }
+}

+ 50 - 0
src/main/java/org/ccframe/app/SecurityConfigurer.java

@@ -0,0 +1,50 @@
+package org.ccframe.app;
+
+import org.ccframe.commons.auth.JwtHeadFilter;
+import org.ccframe.config.CoreMapping;
+import org.ccframe.config.GlobalEx;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.security.config.annotation.web.builders.HttpSecurity;
+import org.springframework.security.config.annotation.web.builders.WebSecurity;
+import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity;
+import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter;
+import org.springframework.security.config.http.SessionCreationPolicy;
+import org.springframework.security.web.authentication.UsernamePasswordAuthenticationFilter;
+
+@Configuration
+@EnableWebSecurity
+public class SecurityConfigurer extends WebSecurityConfigurerAdapter {
+
+	@Override
+	protected void configure(HttpSecurity http) throws Exception {
+		http
+			.authorizeRequests()
+				.antMatchers("/admin/**").authenticated()
+				.antMatchers("/api/**").authenticated()
+			.and()
+				.addFilterAfter(new JwtHeadFilter(), UsernamePasswordAuthenticationFilter.class)
+				.sessionManagement().sessionCreationPolicy(SessionCreationPolicy.STATELESS)
+			.and()
+				.csrf().disable();
+	}
+
+	/***设置不拦截规则*/
+    @Override
+    public void configure(WebSecurity web) throws Exception {
+    	web.ignoring().antMatchers(
+			"/" + GlobalEx.COMMON_STATICS_DIR + "/**",
+			"/" + GlobalEx.UPLOAD_DIR + "/**",
+			"/" + GlobalEx.SMALL_PICT_DIR + "/**",
+			"/" + GlobalEx.STATICS_DIR + "/**",
+			"/" + GlobalEx.TEMP_DIR + "/**",
+			"/" + GlobalEx.MANAGE_DIR + "/**",
+			"/admin/mainFrame/doLogin",
+			"/admin/cacheIndex/" + CoreMapping.CACHE_INDEX_REBUILD_SINGLE, //特别通道,允许通过URL直接刷新缓存索引无需检查header,同时刷新缓存,需要检查clientJwtAlg,只允许单刷表
+			"/api/common/**",
+			"/druid/**", //druid面板
+			"/v2/**", "/webjars/**", //swagger-api
+			"/*.txt","/*.ico","/*.html" //根目录下验证文件等
+		);
+    }
+}

+ 112 - 0
src/main/java/org/ccframe/app/WebMvcConfigurer.java

@@ -0,0 +1,112 @@
+package org.ccframe.app;
+
+import java.io.File;
+import java.nio.charset.Charset;
+import java.util.List;
+
+import org.ccframe.commons.mvc.CcUserArgumentResolver;
+import org.ccframe.config.GlobalEx;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.context.annotation.Primary;
+import org.springframework.http.converter.HttpMessageConverter;
+import org.springframework.http.converter.StringHttpMessageConverter;
+import org.springframework.http.converter.json.MappingJackson2HttpMessageConverter;
+import org.springframework.web.method.support.HandlerMethodArgumentResolver;
+import org.springframework.web.servlet.config.annotation.CorsRegistry;
+import org.springframework.web.servlet.config.annotation.ResourceHandlerRegistry;
+import org.springframework.web.servlet.config.annotation.WebMvcConfigurationSupport;
+
+import com.fasterxml.jackson.annotation.JsonInclude.Include;
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+import lombok.extern.log4j.Log4j;
+import lombok.extern.slf4j.Slf4j;
+
+@Configuration
+@Slf4j
+public class WebMvcConfigurer extends WebMvcConfigurationSupport  {
+	
+	private static final String FILE_PERFIX = "file:";
+	
+	@Value("${app.debug.swagger}")
+	private boolean swagger;
+	
+	/**
+	 * adminUser, apiUser的自动注入参数
+	 */
+	@Override
+    protected void addArgumentResolvers(List<HandlerMethodArgumentResolver> argumentResolvers) {
+        argumentResolvers.add(new CcUserArgumentResolver());
+        super.addArgumentResolvers(argumentResolvers);
+    }
+
+	@Override
+    public void configureMessageConverters(List<HttpMessageConverter<?>> converters) {
+        converters.add(new StringHttpMessageConverter(Charset.forName("UTF-8")));
+        // 这里必须加上加载默认转换器,不然bug玩死人,并且该bug目前在网络上似乎没有解决方案
+        // 百度,谷歌,各大论坛等。你可以试试去掉。
+        addDefaultHttpMessageConverters(converters);
+    }
+
+	@Override
+    public void addResourceHandlers(ResourceHandlerRegistry registry) {
+        //registry.addResourceHandler("/**").addResourceLocations("classpath:/static/");
+
+		innerAddResourceHandlers(registry, GlobalEx.COMMON_STATICS_DIR);
+		innerAddResourceHandlers(registry, GlobalEx.UPLOAD_DIR);
+		innerAddResourceHandlers(registry, GlobalEx.SMALL_PICT_DIR);
+		innerAddResourceHandlers(registry, GlobalEx.STATICS_DIR);
+		innerAddResourceHandlers(registry, GlobalEx.TEMP_DIR);
+		innerAddResourceHandlers(registry, GlobalEx.MANAGE_DIR);
+
+        registry.addResourceHandler("/*.txt","/*.ico","/*.html").addResourceLocations(FILE_PERFIX + GlobalEx.APP_BASE_DIR + File.separator);
+
+        registry.addResourceHandler("doc.html").addResourceLocations("classpath:/META-INF/resources/"); //knife4j的地址
+        if(swagger) {
+            log.info("Inited Api document at URI /doc.html");
+        }
+        registry.addResourceHandler("/webjars/**").addResourceLocations("classpath:/META-INF/resources/webjars/");
+	}
+
+	/**
+	 * 配置返回映射,null值字段不返回
+	 * @return
+	 */
+	@Bean
+    @Primary
+    public MappingJackson2HttpMessageConverter customJackson2HttpMessageConverter(){
+        MappingJackson2HttpMessageConverter convertor= new MappingJackson2HttpMessageConverter();
+        ObjectMapper mapper = new ObjectMapper();
+        mapper.disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
+        mapper.setSerializationInclusion(Include.NON_NULL);
+        convertor.setObjectMapper(mapper);
+        return convertor;
+    }
+
+	private void innerAddResourceHandlers(ResourceHandlerRegistry registry, String dirName) {
+        registry.addResourceHandler("/" + dirName + "/**").addResourceLocations(FILE_PERFIX + GlobalEx.APP_BASE_DIR + File.separator + dirName + File.separator);
+	}
+	
+	@Value("${app.debug.cors:false}")
+	private boolean cors;
+
+	@Override
+	protected void addCorsMappings(CorsRegistry registry) {
+		if(cors) {
+			registry.addMapping("/**")
+	        .allowedOrigins("*")
+	        .allowedMethods("GET", "HEAD", "POST","PUT", "DELETE", "OPTIONS")
+	        .allowedHeaders("*")
+	        .exposedHeaders("access-control-allow-headers",
+	                "access-control-allow-methods",
+	                "access-control-allow-origin",
+	                "access-control-max-age",
+	                "X-Frame-Options")
+	        .allowCredentials(false).maxAge(3600);
+		}
+		super.addCorsMappings(registry);
+	}
+}

+ 7 - 0
src/main/java/org/ccframe/commons/auth/HasRoleSet.java

@@ -0,0 +1,7 @@
+package org.ccframe.commons.auth;
+
+import java.util.Set;
+
+public interface HasRoleSet {
+	Set<Integer> getRoleIds(); //只有强制刷新后角色ID才改变
+}

+ 35 - 0
src/main/java/org/ccframe/commons/auth/JwtAuthenticationToken.java

@@ -0,0 +1,35 @@
+package org.ccframe.commons.auth;
+
+import java.util.Collection;
+
+import org.springframework.security.authentication.AbstractAuthenticationToken;
+import org.springframework.security.core.GrantedAuthority;
+
+public class JwtAuthenticationToken extends AbstractAuthenticationToken {
+
+	private static final long serialVersionUID = 534354716662455704L;
+
+    private final TokenUser principal;
+
+    /**
+     * 生成权限 
+     * @param principal
+     * @param authorities
+     */
+    public JwtAuthenticationToken(TokenUser tokenUser, Collection<? extends GrantedAuthority> authorities) {
+        super(authorities);
+        this.principal = tokenUser;
+        setAuthenticated(true);
+    }
+
+	@Override
+	public Object getCredentials() {
+		return null;
+	}
+
+	@Override
+	public Object getPrincipal() {
+		return principal;
+	}
+
+}

+ 79 - 0
src/main/java/org/ccframe/commons/auth/JwtHeadFilter.java

@@ -0,0 +1,79 @@
+package org.ccframe.commons.auth;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.stream.Collectors;
+
+import javax.servlet.FilterChain;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.springframework.http.MediaType;
+import org.springframework.security.core.GrantedAuthority;
+import org.springframework.security.core.authority.SimpleGrantedAuthority;
+import org.springframework.security.core.context.SecurityContextHolder;
+import org.springframework.security.web.authentication.WebAuthenticationDetails;
+import org.springframework.web.filter.OncePerRequestFilter;
+
+import com.alibaba.fastjson.JSON;
+import org.ccframe.commons.util.JsonUtil;
+import org.ccframe.commons.util.JwtUtil;
+import org.ccframe.config.GlobalEx;
+import org.ccframe.config.ResGlobalEx;
+import org.ccframe.subsys.core.dto.ErrorObjectResp;
+
+import io.jsonwebtoken.JwtException;
+
+public class JwtHeadFilter extends OncePerRequestFilter {
+
+	@Override
+	protected void doFilterInternal(HttpServletRequest request, HttpServletResponse response, FilterChain filterChain)
+			throws ServletException, IOException {
+
+		String uri = request.getRequestURI();
+		String token = null;
+		if(uri.startsWith("/" + GlobalEx.ADMIN_URI_PERFIX)) { //后端用户
+			token = request.getHeader(GlobalEx.ADMIN_TOKEN);
+		}else if(uri.startsWith("/" + GlobalEx.API_URI_PERFIX)) { //前端用户
+			token = request.getHeader(GlobalEx.API_TOKEN);
+		}else {
+            filterChain.doFilter(request, response);
+            return;
+		}
+        if (token==null || token.isEmpty()){
+            filterChain.doFilter(request,response);
+            return;
+        }
+        try {
+        	TokenUser tokenUser = JwtUtil.decodeData(token, TokenUser.class);
+
+        	//认证,根据ROLE集合解析即可
+        	JwtAuthenticationToken jwtAuthenticationToken =  new JwtAuthenticationToken(
+        		tokenUser,
+//    			new HashSet<GrantedAuthority>()
+        		tokenUser.getRoleIds().stream().map(item->new SimpleGrantedAuthority("ROLE_"+item)).collect(Collectors.toList())
+    		);
+        	
+        	jwtAuthenticationToken.setDetails(new WebAuthenticationDetails(request));
+            SecurityContextHolder.getContext().setAuthentication(jwtAuthenticationToken); //授权对象放入上下文
+            filterChain.doFilter(request,response);
+        }catch(IllegalArgumentException|JwtException e) {
+        	response.setStatus(HttpServletResponse.SC_FORBIDDEN);
+    		response.setContentType(MediaType.APPLICATION_JSON_VALUE);
+            response.setCharacterEncoding("UTF-8");
+            response.setHeader("Cache-Control", "no-cache, must-revalidate");  
+            try {
+                response.getWriter().write(JSON.toJSONString(new ErrorObjectResp(ResGlobalEx.ERRORS_EXCEPTION, e.getMessage() , null)));
+                logger.error(e.getMessage());
+                response.flushBuffer();
+            } catch (IOException ioe) {
+            	logger.error("与客户端通讯异常:" + e.getMessage(), e);
+                e.printStackTrace();
+            }
+		}
+	}
+
+}

+ 79 - 0
src/main/java/org/ccframe/commons/auth/JwtUser.java

@@ -0,0 +1,79 @@
+package org.ccframe.commons.auth;
+
+import org.springframework.security.core.GrantedAuthority;
+import org.springframework.security.core.authority.SimpleGrantedAuthority;
+import org.springframework.security.core.userdetails.UserDetails;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * @author niXueChao
+ * @date 2019/4/8 11:29.
+ */
+public class JwtUser implements UserDetails {
+
+	private static final long serialVersionUID = 4375649169544078019L;
+	
+	private String username;
+    private String password;
+    private List<SimpleGrantedAuthority> authorities;
+
+    public JwtUser() {
+    }
+
+    public JwtUser(String username, String password, String ... roles) {
+        this.username = username;
+        this.password = password;
+        this.authorities= Arrays.stream(roles).map(SimpleGrantedAuthority::new).collect(Collectors.toList());
+    }
+
+    @Override
+    public Collection<? extends GrantedAuthority> getAuthorities() {
+        return this.authorities;
+    }
+
+    @Override
+    public String getPassword() {
+        return this.password;
+    }
+
+    @Override
+    public String getUsername() {
+        return this.username;
+    }
+
+    @Override
+    public boolean isAccountNonExpired() {
+        return true;
+    }
+
+    @Override
+    public boolean isAccountNonLocked() {
+        return true;
+    }
+
+    @Override
+    public boolean isCredentialsNonExpired() {
+        return true;
+    }
+
+    @Override
+    public boolean isEnabled() {
+        return true;
+    }
+
+    public void setUsername(String username) {
+        this.username = username;
+    }
+
+    public void setPassword(String password) {
+        this.password = password;
+    }
+
+    public void setAuthorities(List<SimpleGrantedAuthority> authorities) {
+        this.authorities = authorities;
+    }
+}

+ 18 - 0
src/main/java/org/ccframe/commons/auth/RoleAuth.java

@@ -0,0 +1,18 @@
+package org.ccframe.commons.auth;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+import org.ccframe.subsys.core.domain.code.RoleCodeEnum;
+
+@Target({ ElementType.METHOD, ElementType.TYPE })
+@Retention(RetentionPolicy.RUNTIME)
+@Inherited
+@Documented
+public @interface RoleAuth {
+	RoleCodeEnum[] value();
+}

+ 89 - 0
src/main/java/org/ccframe/commons/auth/RoleAuthAnnotationSecurityMetadataSource.java

@@ -0,0 +1,89 @@
+package org.ccframe.commons.auth;
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+import org.springframework.core.annotation.AnnotationUtils;
+import org.springframework.security.access.ConfigAttribute;
+import org.springframework.security.access.SecurityConfig;
+import org.springframework.security.access.method.AbstractMethodSecurityMetadataSource;
+import org.springframework.util.ClassUtils;
+
+import org.ccframe.subsys.core.controller.MainFrameController;
+import org.ccframe.subsys.core.domain.code.RoleCodeEnum;
+
+public class RoleAuthAnnotationSecurityMetadataSource extends AbstractMethodSecurityMetadataSource {
+
+	@Override
+	public Collection<ConfigAttribute> getAttributes(Method method, Class<?> targetClass) {
+
+		RoleAuth annotation = findAnnotation(method, targetClass, RoleAuth.class);
+		if (annotation == null) {
+			// There is no meta-data so return
+			logger.trace("No expression annotations found");
+			return Collections.emptyList();
+		}
+
+		RoleCodeEnum[] attributeTokens = annotation.value();
+		List<ConfigAttribute> attributes = new ArrayList<>(attributeTokens.length);
+
+		for (RoleCodeEnum roleCodeEnum : attributeTokens) {
+			attributes.add(new SecurityConfig("ROLE_" + roleCodeEnum.toCode()));
+		}
+
+		return attributes;
+	}
+
+	@Override
+	public Collection<ConfigAttribute> getAllConfigAttributes() {
+		return null;
+	}
+
+	/**
+	 * See
+	 * {@link org.springframework.security.access.method.AbstractFallbackMethodSecurityMetadataSource#getAttributes(Method, Class)}
+	 * for the logic of this method. The ordering here is slightly different in that we
+	 * consider method-specific annotations on an interface before class-level ones.
+	 */
+	private <A extends Annotation> A findAnnotation(Method method, Class<?> targetClass,
+			Class<A> annotationClass) {
+		// The method may be on an interface, but we need attributes from the target
+		// class.
+		// If the target class is null, the method will be unchanged.
+		Method specificMethod = ClassUtils.getMostSpecificMethod(method, targetClass);
+		A annotation = AnnotationUtils.findAnnotation(specificMethod, annotationClass);
+
+		if (annotation != null) {
+			logger.debug(annotation + " found on specific method: " + specificMethod);
+			return annotation;
+		}
+
+		// Check the original (e.g. interface) method
+		if (specificMethod != method) {
+			annotation = AnnotationUtils.findAnnotation(method, annotationClass);
+
+			if (annotation != null) {
+				logger.debug(annotation + " found on: " + method);
+				return annotation;
+			}
+		}
+
+		// Check the class-level (note declaringClass, not targetClass, which may not
+		// actually implement the method)
+		annotation = AnnotationUtils.findAnnotation(specificMethod.getDeclaringClass(),
+				annotationClass);
+
+		if (annotation != null) {
+			logger.debug(annotation + " found on: "
+					+ specificMethod.getDeclaringClass().getName());
+			return annotation;
+		}
+
+		return null;
+	}
+
+}

+ 83 - 0
src/main/java/org/ccframe/commons/auth/TokenUser.java

@@ -0,0 +1,83 @@
+package org.ccframe.commons.auth;
+
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.ccframe.config.GlobalEx;
+import org.ccframe.subsys.core.domain.code.BoolCodeEnum;
+import org.ccframe.subsys.core.domain.entity.User;
+
+import lombok.AllArgsConstructor;
+import lombok.Getter;
+import lombok.NoArgsConstructor;
+import lombok.Setter;
+import lombok.ToString;
+
+/**
+ * token用户.
+ * 如果以后扩展对operation的限制,也在此一并返回所有的按钮操作权限。
+ * @author JIM
+ *
+ */
+@Getter
+@Setter
+@NoArgsConstructor
+@AllArgsConstructor
+@ToString
+public class TokenUser implements Serializable, HasRoleSet{
+
+	private static final long serialVersionUID = -1077181653115888934L;
+
+	public static final String USER_ID = "userId";
+
+	public static final String LOGIN_ID = "loginId";
+
+	public static final String EXPIRE_TIME = "expireTime";
+
+	private Integer userId;
+	
+	private String loginId;
+
+	private Integer platformId;
+
+	private Set<Integer> roleIds;
+
+	private Long expireTime; //用来判断过期刷新的时间
+	
+	private boolean ifAdmin;
+	
+	/**
+	 * 登陆后创建
+	 * @param user
+	 * @return
+	 */
+	public static TokenUser create(User user){
+		TokenUser adminUser =  new TokenUser();
+		adminUser.setLoginId(user.getLoginId());
+		adminUser.setUserId(user.getUserId());
+		adminUser.setPlatformId(user.getPlatformId());
+		adminUser.setRoleIds( Arrays.stream(user.getRoleCodeStr().split(GlobalEx.DEFAULT_TEXT_SPLIT_CHAR)).map(Integer::new).collect(Collectors.toSet()));
+		adminUser.setIfAdmin(BoolCodeEnum.toBoolValue(user.getIfAdmin()));
+		return adminUser;
+	}
+
+	/**
+	 * 由前端请求创建,用于刷新token.
+	 * @param dataMap
+	 * @param platformId
+	 * @param ifAdmin
+	 * @return
+	 */
+	public static TokenUser create(Map<String, Object> dataMap, Integer platformId, boolean ifAdmin){
+		TokenUser clientUser =  new TokenUser();
+		clientUser.setLoginId((String)dataMap.get(LOGIN_ID));
+		clientUser.setUserId((Integer)dataMap.get(USER_ID));
+		clientUser.setExpireTime((Long)dataMap.get(EXPIRE_TIME));
+		clientUser.setPlatformId(platformId);
+		clientUser.setIfAdmin(ifAdmin);
+		return clientUser;
+	}
+}

+ 50 - 0
src/main/java/org/ccframe/commons/base/AggregationField.java

@@ -0,0 +1,50 @@
+package org.ccframe.commons.base;
+
+/**
+ * ElasticSearch 简单统计参数
+ * @author JIM
+ *
+ */
+public class AggregationField {
+
+	public static enum AggregationType{
+		MAX,MIN,AVG,SUM,COUNT,COUNT_DISTINCT
+	}
+	
+	/**
+	 * 统计字段的名字
+	 */
+	private String fieldName;
+	
+	/**
+	 * 统计map输出的名字
+	 */
+	private String aggregationFieldName;
+	
+	private AggregationType aggregationType;
+	
+	public AggregationField(String aggregationFieldName, String fieldName, AggregationType aggregationType){
+		this.aggregationFieldName = aggregationFieldName;
+		this.fieldName = fieldName;
+		this.aggregationType = aggregationType;
+	}
+	
+	public String getFieldName() {
+		return fieldName;
+	}
+	public void setFieldName(String fieldName) {
+		this.fieldName = fieldName;
+	}
+	public String getAggregationFieldName() {
+		return aggregationFieldName;
+	}
+	public void setAggregationFieldName(String aggregationFieldName) {
+		this.aggregationFieldName = aggregationFieldName;
+	}
+	public AggregationType getAggregationType() {
+		return aggregationType;
+	}
+	public void setAggregationType(AggregationType aggregationType) {
+		this.aggregationType = aggregationType;
+	}
+}

+ 96 - 0
src/main/java/org/ccframe/commons/base/BaseEntity.java

@@ -0,0 +1,96 @@
+package org.ccframe.commons.base;
+
+import java.io.Serializable;
+import java.util.Date;
+
+import javax.persistence.Column;
+import javax.persistence.EntityListeners;
+import javax.persistence.MappedSuperclass;
+import javax.persistence.Temporal;
+import javax.persistence.TemporalType;
+
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.ccframe.commons.helper.EntityOperationListener;
+import org.ccframe.config.GlobalEx;
+import org.springframework.data.elasticsearch.annotations.DateFormat;
+import org.springframework.data.elasticsearch.annotations.Field;
+import org.springframework.data.elasticsearch.annotations.FieldType;
+
+import com.fasterxml.jackson.annotation.JsonFormat;
+
+import lombok.Getter;
+import lombok.Setter;
+
+
+/**
+ * 当需要索引排序和操作审计或需要乐观锁,需要继承此基类,业务不搜索的子表不建议继承,长流程的业务不建议继承(乐观锁失败概率)
+ * save会自动插入和更新内容,注意预置数据.
+ * 
+ * 乐观锁需要时候在对应表去处理,因为乐观锁会增加数据库校验查询
+ * 
+ * @author JIM
+ *
+ */
+@MappedSuperclass
+@Setter
+@Getter
+@EntityListeners({EntityOperationListener.class})
+public abstract class BaseEntity implements Serializable{
+
+	private static final long serialVersionUID = -5656014821398158846L;
+
+	public static final String CREATE_TIME = "createTime";
+	
+	public static final String UPDATE_TIME = "updateTime";
+	
+	public static final String CREATE_USER_ID = "createUserId"; //创建人
+	
+	public static final String UPDATE_USER_ID = "updateUserId"; //最后修改人
+
+    @Temporal(TemporalType.TIMESTAMP)
+    @Column(name = "CREATE_TIME", nullable = false, length = 0, updatable = false)
+	//elasticsearch
+    @Field(type = FieldType.Date, format = DateFormat.custom, pattern = GlobalEx.ES_DATE_PATTERN)
+    @JsonFormat (shape = JsonFormat.Shape.STRING, pattern = GlobalEx.STANDERD_DATE_FORMAT, timezone = GlobalEx.TIMEZONE)
+    private Date createTime;
+	
+    @Temporal(TemporalType.TIMESTAMP)
+    @Column(name = "UPDATE_TIME", nullable = false, length = 0)
+	//elasticsearch
+    @Field(type = FieldType.Date, format = DateFormat.custom, pattern = GlobalEx.ES_DATE_PATTERN)
+    @JsonFormat (shape = JsonFormat.Shape.STRING, pattern = GlobalEx.STANDERD_DATE_FORMAT, timezone = GlobalEx.TIMEZONE)
+	private Date updateTime;
+	
+    @Column(name = "CREATE_USER_ID", nullable = true, length = 10, updatable = false)
+    @Field(type = FieldType.Integer)
+	private Integer createUserId;
+	
+    @Column(name = "UPDATE_USER_ID", nullable = true, length = 10)
+    @Field(type = FieldType.Integer)
+	private Integer updateUserId;
+
+	public abstract Integer getId();
+
+	@Override
+	public boolean equals(Object obj) {
+		if(!(getClass().isInstance(obj))){
+			return false;
+		}
+		if(this == obj){
+			return true;
+		}
+		BaseEntity other = (BaseEntity)obj;
+		return new EqualsBuilder()
+		.append(getId(),other.getId())
+		.isEquals();
+	}
+	
+	@Override
+	public int hashCode() {
+		return new HashCodeBuilder()
+		.append(getId())
+		.toHashCode();
+	}
+
+}

+ 13 - 0
src/main/java/org/ccframe/commons/base/BaseRepository.java

@@ -0,0 +1,13 @@
+package org.ccframe.commons.base;
+
+import org.springframework.data.jpa.repository.JpaRepository;
+import org.springframework.data.jpa.repository.JpaSpecificationExecutor;
+import org.springframework.data.repository.NoRepositoryBean;
+
+/**
+ * @author Jim Wu
+ */
+@NoRepositoryBean
+public interface BaseRepository<E> extends JpaRepository<E, Integer>, JpaSpecificationExecutor<E>{
+}
+

+ 10 - 0
src/main/java/org/ccframe/commons/base/BaseSearchRepository.java

@@ -0,0 +1,10 @@
+package org.ccframe.commons.base;
+
+import java.io.Serializable;
+
+import org.springframework.data.elasticsearch.repository.ElasticsearchRepository;
+import org.springframework.data.repository.PagingAndSortingRepository;
+
+public interface BaseSearchRepository<E, ID extends Serializable> extends ElasticsearchRepository<E, ID>, PagingAndSortingRepository<E, ID>{
+
+}

+ 569 - 0
src/main/java/org/ccframe/commons/base/BaseSearchService.java

@@ -0,0 +1,569 @@
+package org.ccframe.commons.base;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import javax.persistence.Id;
+
+import org.apache.commons.lang3.RegExUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.ccframe.commons.helper.SpringContextHelper;
+import org.ccframe.commons.util.UtilDateTime;
+import org.ccframe.config.Global;
+import org.ccframe.config.GlobalEx;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.TermQueryBuilder;
+import org.elasticsearch.index.query.TermsQueryBuilder;
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.cardinality.InternalCardinality;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.min.Min;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount;
+import org.redisson.api.RAtomicLong;
+import org.redisson.api.RedissonClient;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.data.domain.PageRequest;
+import org.springframework.data.domain.Sort;
+import org.springframework.data.domain.Sort.Order;
+import org.springframework.data.elasticsearch.core.ElasticsearchOperations;
+import org.springframework.data.elasticsearch.core.ResultsExtractor;
+import org.springframework.data.elasticsearch.core.query.Criteria;
+import org.springframework.data.elasticsearch.core.query.CriteriaQuery;
+import org.springframework.data.elasticsearch.core.query.NativeSearchQuery;
+import org.springframework.data.elasticsearch.core.query.NativeSearchQueryBuilder;
+import org.springframework.data.elasticsearch.repository.ElasticsearchRepository;
+import org.springframework.scheduling.annotation.Async;
+import org.springframework.transaction.annotation.Transactional;
+
+import com.google.common.collect.Lists;
+
+import net.oschina.j2cache.CacheChannel;
+
+/**
+ * 通用搜索方案.当索引正在建立时,findbykey等会自动发生降级为数据库查询. 以保护索引重建阶段的数据获取
+ * 已知缺陷:部分扩展的索引专用字段降级为数据库搜索将会发生错误,需要考虑建索引时这些的处理
+ * 
+ * @author JIM
+ *
+ * @param <E>
+ * @param <Integer>
+ * @param <R>
+ */
+public abstract class BaseSearchService<E extends BaseEntity, R extends ElasticsearchRepository<E, Integer>>
+		implements IHasSearchBuilder<E> { // spring 4.X 支持泛型注入
+
+	private static final int INDEX_LOG_STEP = 1000;
+
+	private Logger logger = LoggerFactory.getLogger(this.getClass().getName());
+
+	private R repository;
+
+	private Class<E> entityClass;
+	private String idFieldName;
+	private Class<?> idFieldType;
+	private BaseService<E, ?> jpaService;
+
+	protected static final int PAGE_REQUEST_MAX = 10000; // result_window最大值
+
+	protected static final String Aggregation_FIELD = "aggregation_";
+
+	@Autowired
+	protected CacheChannel cacheChannel;
+
+	@Autowired
+	private ElasticsearchOperations elasticsearchTemplate;
+
+	@Autowired
+	protected RedissonClient redissonClient;
+
+	protected int getIndexStep() { // 如果某些表的数据过大,导致一次建立的数据超过es限制,可以适量调小此值
+		return INDEX_LOG_STEP;
+	}
+	
+	@SuppressWarnings("unchecked") // NOSONAR
+	public BaseSearchService() {
+		Class<?> typeCls = getClass();
+		Type genType = typeCls.getGenericSuperclass();
+		while (true) {
+			if (!(genType instanceof ParameterizedType)) {
+				typeCls = typeCls.getSuperclass();
+				genType = typeCls.getGenericSuperclass();
+			} else {
+				break;
+			}
+		}
+		this.entityClass = (Class<E>) ((ParameterizedType) genType).getActualTypeArguments()[0];
+
+		Field[] fields = entityClass.getDeclaredFields();
+		for (Field field : fields) {
+			if (field.isAnnotationPresent(Id.class)) {
+				idFieldName = field.getName();
+				idFieldType = field.getType();
+			}
+		}
+	}
+
+	public Class<E> getEntityClass(){
+		return entityClass;
+	}
+
+	protected Logger getLogger() {
+		return logger;
+	}
+
+	@Autowired
+	public void setRepository(R repository) {
+		this.repository = repository;
+	}
+
+	protected R getRepository() {
+		return repository;
+	}
+
+	public E getById(Integer Integer) {//
+		return getRepository().findById(Integer).orElse(null);
+	}
+
+	public Iterable<E> listAll() {
+		return getRepository().findAll();
+	}
+
+	public void saveBatch(List<E> dataList) {
+		for(E data: dataList) {
+			buildExtend(data);
+		}
+		this.getRepository().saveAll(dataList);
+	}
+
+	public void deleteBatch(List<E> dataList) {
+		this.getRepository().deleteAll(dataList);
+	}
+
+	public void save(E data) {
+		buildExtend(data);
+		getRepository().save(data);
+	}
+
+	public void delete(E data) {
+		getRepository().delete(data);
+	}
+
+	public void deleteById(Integer Integer) {
+		getRepository().deleteById(Integer);
+	}
+
+	protected static final Pattern DISK_CHECK_PATTERN = Pattern.compile("ClusterBlockException.+read-only.+allow delete");
+
+	@Value("${app.search.embedded}")
+	private boolean embedded;
+	
+	@Async
+	@Override
+	@Transactional(readOnly = true)
+	public void buildAllIndex(boolean forceDelete) {
+		cacheChannel.set(GlobalEx.CACHEREGION_FOREVER, entityClass.getSimpleName() + GlobalEx.INDEXING_FLAG, 1);
+		try {
+			if(forceDelete) {
+				if(embedded) {
+					for(E e: this.getRepository().findAll()) { //embedded模式只能一个一个的删除,效率很低,并受max_result_window(当前是1000W)的限制
+						this.getRepository().delete(e);
+					}
+				}else {
+					this.getRepository().deleteAll(); //清掉清掉.在embedded模式不可用
+				}
+			}
+
+			List<Integer> idList = getJpaService().findIdList();
+			if(!idList.isEmpty()) {
+				syncRedisIdMax(Collections.max(idList));
+			}
+			List<List<Integer>> workIdGroupList = Lists.partition(idList, getIndexStep());
+			int i = 0;
+			for(List<Integer> workIdList: workIdGroupList) {
+				try {
+					List<E> entityList = getJpaService().getByIdBatch(workIdList, true);
+					for(E e: entityList) {
+						buildExtend(e);
+					}
+					this.getRepository().saveAll(entityList); // 批量建立索引
+					i += workIdList.size();
+					logger.info("索引进度: {} {}/{}",entityClass.getSimpleName(),i,idList.size());
+					cacheChannel.set(GlobalEx.CACHEREGION_FOREVER, entityClass.getSimpleName() + Global.INDEXING_STATUS, 
+						UtilDateTime.convertDateTimeToString(new Date()) + GlobalEx.DEFAULT_TEXT_SPLIT_CHAR + i + GlobalEx.DEFAULT_TEXT_SPLIT_CHAR + idList.size()); //首字符串表示建立时间,如果超过当前1小时,那么就不用显示。用于观察批量索引建立
+				}catch (org.springframework.data.elasticsearch.ElasticsearchException ex) { // TODO
+					// 用UncategorizedDataAccessException测试磁盘满情况
+					if (!ex.getFailedDocuments().isEmpty()) {
+						for (String faildedText : ex.getFailedDocuments().values()) {
+							Matcher matcher = DISK_CHECK_PATTERN.matcher(faildedText);
+							if (matcher.find()) {
+								throw new RuntimeException("剩余磁盘少于5%,请清理空间后重启应用");
+							}
+						}
+					}
+					throw ex;
+				}
+			}
+		}
+		finally {
+			cacheChannel.set(GlobalEx.CACHEREGION_FOREVER, entityClass.getSimpleName() + GlobalEx.INDEXING_FLAG, 0);
+		}
+	}
+
+	private void syncRedisIdMax(Integer max) {
+		if(max == null) {
+			return;
+		}
+		RAtomicLong atomicLong = redissonClient.getAtomicLong(Global.REDIS_PERFIX + idFieldName);
+		Long next = atomicLong.get();
+		if(next == null || next <= max) { //修正初始值
+			atomicLong.set(max + 1);
+		}
+	}
+
+	protected void buildExtend(E e) { // 建立附加数据,如果提供仅供ES查询的扩展字段,需要覆盖此进行处理
+		// 建立其他ES数据字段
+	}
+
+	private void checkType(Object object) {
+		if (object == null) {
+			logger.error("search object type error:null");
+		}
+		if (!(object instanceof Number || object instanceof Boolean || object instanceof String
+				|| object instanceof Collection || object instanceof SearchRange)) {
+			logger.warn("search object type error:" + object.getClass().getSimpleName());
+		}
+	}
+
+	public List<E> search(CriteriaQuery query){
+		return elasticsearchTemplate.queryForList(query, entityClass);
+	}
+	
+	protected ElasticsearchOperations getElasticsearchTemplate() {
+		return elasticsearchTemplate;
+	}
+
+	/**
+	 * 以orderField字段默认降序排序,通常用于时间字段
+	 * 
+	 * @param fieldName
+	 * @param value
+	 * @param defaultOrderField
+	 * @return
+	 */
+	public List<E> findByKeyDesc(String fieldName, Collection<?> values, String orderField) {
+		return findByKey(fieldName, values, Order.desc(orderField));
+	}
+
+	/**
+	 * 以orderField字段默认升序排序,通常用于position字段
+	 * 
+	 * @param fieldName
+	 * @param value
+	 * @param defaultOrderField
+	 * @return
+	 */
+	public List<E> findByKeyAsc(String fieldName, Collection<?> values, String orderField) {
+		return findByKey(fieldName, values, Order.asc(orderField));
+	}
+
+	public List<E> findByKey(String fieldName, Collection<?> values, Order... orders) {
+		int indexBuilding = cacheChannel.get(GlobalEx.CACHEREGION_FOREVER, entityClass.getSimpleName() + GlobalEx.INDEXING_FLAG).asInt(0);
+		if (indexBuilding == 0) {
+			CriteriaQuery criteriaQuery = new CriteriaQuery(Criteria.where(fieldName).in(values)); //如果是ngram字段查询,使用match查询后and方式来匹配所有符合条件
+			if (orders.length > 0) {
+				criteriaQuery.addSort(Sort.by(orders));
+			}
+			criteriaQuery.setPageable(PageRequest.of(0, PAGE_REQUEST_MAX, Sort.by(orders)));
+			return elasticsearchTemplate.queryForList(criteriaQuery, entityClass);
+		} else { // 降级为数据库查询
+			return getJpaService().findByKey(fieldName, values, orders);
+		}
+	}
+
+	/**
+	 * 以orderField字段默认降序排序,通常用于时间字段
+	 * 
+	 * @param fieldName
+	 * @param value
+	 * @param defaultOrderField
+	 * @return
+	 */
+	public List<E> findByKeyDesc(String fieldName, Object value, String orderField) {
+		return findByKey(fieldName, value, Order.desc(orderField));
+	}
+
+	/**
+	 * 以orderField字段默认升序排序,通常用于position字段
+	 * 
+	 * @param fieldName
+	 * @param value
+	 * @param defaultOrderField
+	 * @return
+	 */
+	public List<E> findByKeyAsc(String fieldName, Object value, String orderField) {
+		return findByKey(fieldName, value, Order.asc(orderField));
+	}
+
+	public List<E> findByKey(String fieldName, Object value, Order... orders) {
+		checkType(value);
+		int indexBuilding = cacheChannel.get(GlobalEx.CACHEREGION_FOREVER, entityClass.getSimpleName() + GlobalEx.INDEXING_FLAG).asInt(0);
+		if (indexBuilding == 0) {
+			CriteriaQuery criteriaQuery = new CriteriaQuery(Criteria.where(fieldName).is(value)); //如果是ngram字段查询,使用match查询后and方式来匹配所有符合条件
+			if (orders.length > 0) {
+				criteriaQuery.addSort(Sort.by(orders));
+			}
+			criteriaQuery.setPageable(PageRequest.of(0, PAGE_REQUEST_MAX, Sort.by(orders)));
+			return elasticsearchTemplate.queryForList(criteriaQuery, entityClass);
+		} else { // 降级为数据库查询
+			return getJpaService().findByKey(fieldName, value, orders);
+		}
+	}
+
+	public E getByKey(String fieldName, Collection<?> values) {
+		for (Object value : values) {
+			checkType(value);
+		}
+		try {
+			int indexBuilding = cacheChannel.get(GlobalEx.CACHEREGION_FOREVER, entityClass.getSimpleName() + GlobalEx.INDEXING_FLAG).asInt(0);
+			if (indexBuilding == 0) {
+				return elasticsearchTemplate.queryForObject(new CriteriaQuery(Criteria.where(fieldName).in(values)), entityClass);
+			} else { // 降级为数据库查询
+				return getJpaService().getByKey(fieldName, values);
+			}
+		} catch (NoSuchElementException e) {
+			return null;
+		}
+	}
+
+	public E getByKey(String fieldName, Object value) {
+		checkType(value);
+		try {
+			int indexBuilding = cacheChannel.get(GlobalEx.CACHEREGION_FOREVER, entityClass.getSimpleName() + GlobalEx.INDEXING_FLAG).asInt(0);
+			if (indexBuilding == 0) {
+				return elasticsearchTemplate.queryForObject(new CriteriaQuery(Criteria.where(fieldName).is(value)), entityClass);
+			} else { // 降级为数据库查询
+				return getJpaService().getByKey(fieldName, value);
+			}
+		} catch (NoSuchElementException e) {
+			return null;
+		}
+	}
+
+	/**
+	 * 多键值查询,Object为数值范围查询时,支持使用SearchRange.
+	 * 
+	 * @param fieldNames
+	 * @param values
+	 * @param orders
+	 * @return
+	 */
+	public List<E> findByMultiKey(String[] fieldNames, Object[] values, Order... orders) {
+		for (Object value : values) {
+			checkType(value);
+		}
+		int indexBuilding = cacheChannel.get(GlobalEx.CACHEREGION_FOREVER, entityClass.getSimpleName() + GlobalEx.INDEXING_FLAG).asInt(0);
+		if (indexBuilding == 0) {
+			Criteria criteria = new Criteria();
+			for (int i = 0; i < fieldNames.length; i++) {
+				Object value = values[i];
+				if (value instanceof Iterable) {
+					criteria.and(new Criteria(fieldNames[i]).in((Iterable)values[i]));
+				} else if (value instanceof SearchRange) {
+					SearchRange searchRange = (SearchRange) value;
+					if(searchRange.getStart() != null && searchRange.getEnd() != null) {
+						criteria.and(new Criteria(fieldNames[i]).between(searchRange.getStart(),searchRange.getEnd()));
+					}else if(searchRange.getStart() != null) {
+						criteria.and(new Criteria(fieldNames[i]).greaterThanEqual(searchRange.getStart()));
+					}else if(searchRange.getEnd() != null) {
+						criteria.and(new Criteria(fieldNames[i]).lessThanEqual(searchRange.getEnd()));
+					}
+				} else {
+					criteria.and(new Criteria(fieldNames[i]).is(values[i]));
+				}
+			}
+			CriteriaQuery criteriaQuery = new CriteriaQuery(criteria);
+			criteriaQuery.setPageable(PageRequest.of(0, PAGE_REQUEST_MAX, Sort.by(orders)));
+			return elasticsearchTemplate.queryForList(criteriaQuery, entityClass);
+		} else { // 降级为数据库查询
+			return getJpaService().findByMultiKey(fieldNames, values, orders);
+		}
+	}
+
+	@Transactional(readOnly = true)
+	public E getByMultiKey(String[] fieldNames, Object... values) {
+		for (Object value : values) {
+			checkType(value);
+		}
+		try {
+			int indexBuilding = cacheChannel.get(GlobalEx.CACHEREGION_FOREVER, entityClass.getSimpleName() + GlobalEx.INDEXING_FLAG).asInt(0);
+			if (indexBuilding == 0) {
+				Criteria criteria = new Criteria();
+				for (int i = 0; i < fieldNames.length; i++) {
+					Object value = values[i];
+					if (value instanceof Iterable) {
+						criteria.and(new Criteria(fieldNames[i]).in((Iterable)values[i]));
+					} else if (value instanceof SearchRange) {
+						SearchRange searchRange = (SearchRange) value;
+						if(searchRange.getStart() != null && searchRange.getEnd() != null) {
+							criteria.and(new Criteria(fieldNames[i]).between(searchRange.getStart(),searchRange.getEnd()));
+						}else if(searchRange.getStart() != null) {
+							criteria.and(new Criteria(fieldNames[i]).greaterThanEqual(searchRange.getStart()));
+						}else if(searchRange.getEnd() != null) {
+							criteria.and(new Criteria(fieldNames[i]).lessThanEqual(searchRange.getEnd()));
+						}
+					} else {
+						criteria.and(new Criteria(fieldNames[i]).is(values[i]));
+					}
+				}
+				return elasticsearchTemplate.queryForObject(new CriteriaQuery(criteria), entityClass);
+			} else { // 降级为数据库查询
+				return getJpaService().getByMultiKey(fieldNames, values);
+			}
+		} catch (NoSuchElementException e) {
+			return null;
+		}
+	}
+
+	public int countQuery(QueryBuilder queryBuilder,String fieldName) {
+		int indexBuilding = cacheChannel.get(GlobalEx.CACHEREGION_FOREVER, entityClass.getSimpleName() + GlobalEx.INDEXING_FLAG).asInt(0);
+		if (indexBuilding == 0) {
+			NativeSearchQuery searchQuery = new NativeSearchQueryBuilder().withQuery(queryBuilder).withSearchType(SearchType.DEFAULT).build();
+			return new Long(elasticsearchTemplate.count(searchQuery, this.entityClass)).intValue();
+		}else {
+			return 0; //索引还未建立完,搜索不到
+		}
+	}
+
+	public int countByKey(String fieldName, Object value){
+		int indexBuilding = cacheChannel.get(GlobalEx.CACHEREGION_FOREVER, entityClass.getSimpleName() + GlobalEx.INDEXING_FLAG).asInt(0);
+		if (indexBuilding == 0) {
+			TermQueryBuilder queryBuilder = QueryBuilders.termQuery(fieldName, value);
+			return this.countQuery(queryBuilder, fieldName);
+		}else { //降级为数据库查询
+			return getJpaService().countByKey(fieldName, value);
+		}
+	}
+
+	public int countByKey(String fieldName, Collection<?> values){
+		int indexBuilding = cacheChannel.get(GlobalEx.CACHEREGION_FOREVER, entityClass.getSimpleName() + GlobalEx.INDEXING_FLAG).asInt(0);
+		if (indexBuilding == 0) {
+			TermsQueryBuilder queryBuilder = QueryBuilders.termsQuery(fieldName, values);
+			return this.countQuery(queryBuilder, fieldName);
+		}else { //降级为数据库查询
+			return getJpaService().countByKey(fieldName, values);
+		}
+	}
+
+	/**
+	 * 执行统计请求,注意count和整数字段的sum等也是使用double来处理,返回时要注意类型处理.
+     *
+	 * 例如根据boolQueryBuilder的条件统计ChargeOrder表的CHARGE_AMMOUNT字段,就可以:
+	 * SpringContextHelper.getBean(ChargeOrderSearchService.class).sumQuery(boolQueryBuilder, ChargeOrder.CHARGE_AMMOUNT));
+     *
+	 * @param queryBuilder
+	 * @param aggregationFields
+	 * @return
+	 */
+	public Map<String, Double> aggregationQuery(QueryBuilder queryBuilder,final AggregationField... aggregationFields){
+		
+		NativeSearchQueryBuilder builder = new NativeSearchQueryBuilder()
+			.withQuery(queryBuilder)
+			.withSearchType(SearchType.DEFAULT)
+			.withIndices(entityClass.getSimpleName().toLowerCase()); //查询关联对象的索引,索引名必须小写
+
+		for(AggregationField aggregationField: aggregationFields){
+			switch(aggregationField.getAggregationType()){
+				case MAX:
+					builder.addAggregation(AggregationBuilders.max(aggregationField.getAggregationFieldName()).field(aggregationField.getFieldName()));
+					break;
+				case MIN:
+					builder.addAggregation(AggregationBuilders.min(aggregationField.getAggregationFieldName()).field(aggregationField.getFieldName()));
+					break;
+				case AVG:
+					builder.addAggregation(AggregationBuilders.avg(aggregationField.getAggregationFieldName()).field(aggregationField.getFieldName()));
+					break;
+				case SUM:
+					builder.addAggregation(AggregationBuilders.sum(aggregationField.getAggregationFieldName()).field(aggregationField.getFieldName()));
+					break;
+				case COUNT:
+					builder.addAggregation(AggregationBuilders.count(aggregationField.getAggregationFieldName()).field(aggregationField.getFieldName()));
+					break;
+				case COUNT_DISTINCT:
+					builder.addAggregation(AggregationBuilders.cardinality(aggregationField.getAggregationFieldName()).field(aggregationField.getFieldName()));
+					break;
+				default:
+			}
+		}
+		return elasticsearchTemplate.query(builder.build(), new ResultsExtractor<Map<String, Double>>() {
+			@Override
+			public Map<String, Double> extract(SearchResponse response) {
+				Map<String, Aggregation> aggregationMap = response.getAggregations().asMap();
+				
+				Map<String, Double> resultMap = new HashMap<String, Double>();
+				for (AggregationField aggregationField: aggregationFields) {
+					Aggregation value = aggregationMap.get(aggregationField.getAggregationFieldName());
+					if(value instanceof NumericMetricsAggregation.SingleValue) {
+						resultMap.put(aggregationField.getAggregationFieldName(), ((NumericMetricsAggregation.SingleValue)value).value());
+					}
+				}
+				return resultMap;
+			}
+		});		
+	}
+
+	/**
+	 * 如果async线程池不多,且并发线程要调优,可以将数据最多的表service适当提高优先级以提高线程利用率
+	 * 
+	 * @see org.ccframe.commons.base.IHasSearchBuilder#getPriority()
+	 */
+	@Override
+	public Integer getPriority() {
+		return 100;
+	}
+
+	public BaseService<E, ?> getJpaService() {
+		String beanName = StringUtils.uncapitalize(RegExUtils.replaceFirst(getClass().getSimpleName(),
+				GlobalEx.SEARCH_SERVICE_CLASS_SUFFIX + "$", GlobalEx.SERVICE_CLASS_SUFFIX));
+		if(jpaService == null) {
+			jpaService = (BaseService<E, ?>) SpringContextHelper.getBean(beanName);
+		}
+		return jpaService;
+	}
+
+/*	@SuppressWarnings("unchecked")
+	public Page<E> searchOver10k(QueryBuilder query, Pageable pageable) {
+		IndexCoordinates index = elasticsearchTemplate.getIndexCoordinatesFor(entityClass);
+
+		NativeSearchQuery searchQuery = new NativeSearchQueryBuilder().withQuery(query).withPageable(pageable).build();
+		searchQuery.setTrackTotalHits(true);
+		SearchHits<E> searchHits = elasticsearchTemplate.search(searchQuery, entityClass, index);
+		AggregatedPage<SearchHit<E>> page = SearchHitSupport.page(searchHits, searchQuery.getPageable());
+		return (Page<E>) SearchHitSupport.unwrapSearchHits(page);
+	}
+*/
+	public List<E> searchFuzzy(String fieldName, Object value, Order... orders) { //针对模糊查询字段的搜索(ngram)
+		NativeSearchQuery searchQuery = new NativeSearchQueryBuilder().withQuery(QueryBuilders.matchPhraseQuery(fieldName, value)).withPageable(PageRequest.of(0, PAGE_REQUEST_MAX, Sort.by(orders))).build();
+		return elasticsearchTemplate.queryForList(searchQuery, entityClass);
+	}
+
+}

+ 378 - 0
src/main/java/org/ccframe/commons/base/BaseService.java

@@ -0,0 +1,378 @@
+package org.ccframe.commons.base;
+
+import java.io.Serializable;
+import java.lang.reflect.Field;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import javax.persistence.EntityManager;
+import javax.persistence.Id;
+
+import org.apache.commons.collections.CollectionUtils;
+//import org.ccframe.commons.helper.EhCacheHelper;
+import org.ccframe.commons.helper.SpringContextHelper;
+import org.ccframe.commons.jpaquery.Criteria;
+import org.ccframe.commons.jpaquery.Restrictions;
+//import org.ccframe.commons.util.ElasticsearchTransactionUtil;
+import org.ccframe.config.GlobalEx;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.BeanUtils;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.data.domain.Sort;
+import org.springframework.data.domain.Sort.Order;
+import org.springframework.transaction.annotation.Transactional;
+
+import net.oschina.j2cache.CacheChannel;
+import net.oschina.j2cache.CacheObject;
+
+public abstract class BaseService <E extends BaseEntity,R extends BaseRepository<E>>{ //spring 4.X 支持泛型注入
+	
+    private Logger logger = LoggerFactory.getLogger(this.getClass().getName());
+
+    private Class<E> entityClass;
+    private String idFieldName;
+    private Class<?> idFieldType;
+    
+    @Autowired
+	private EntityManager entityManager;
+	
+	@Autowired
+    private CacheChannel cacheChannel;
+
+    @SuppressWarnings("unchecked") //NOSONAR
+    public BaseService() {
+        Class typeCls = getClass();
+        Type genType = typeCls.getGenericSuperclass();
+        while (true) {
+            if (!(genType instanceof ParameterizedType)) {
+                typeCls = typeCls.getSuperclass();
+                genType = typeCls.getGenericSuperclass();
+            } else {
+                break;
+            }
+        }
+        this.entityClass = (Class<E>) ((ParameterizedType) genType).getActualTypeArguments()[0];
+        
+    	Field[] fields = entityClass.getDeclaredFields();
+    	for(Field field: fields){
+    		if(field.isAnnotationPresent(Id.class)){
+    			idFieldName = field.getName();
+    			idFieldType = field.getType();
+    		}
+    	}
+    }
+  
+    public Class<E> getEntityClass(){
+    	return entityClass;
+    }
+
+	protected Logger getLogger() {
+		return logger;
+	}
+
+	public String getIdFieldName() {
+		return idFieldName;
+	}
+
+	@SuppressWarnings("unchecked")
+	@Transactional(readOnly = true)
+	public List<Integer> findIdList(){
+		return entityManager.createQuery("select o." + idFieldName + " from " + entityClass.getSimpleName() + " o").getResultList();
+	}
+
+	@SuppressWarnings("unchecked")
+	@Transactional(readOnly = true)
+	public Integer getIdByKey(String fieldName, Object value){
+		try {
+			return (Integer)entityManager.createQuery("select o." + idFieldName + " from " + entityClass.getSimpleName() + " o where " + fieldName + "=?0").setParameter(0, value).getSingleResult();
+		}catch(Exception e) {
+			return null;
+		}
+	}
+
+	private R repository;
+	
+	@Autowired
+	public void setRepository(R repository) {
+		this.repository = repository;
+	}
+
+	protected R getRepository(){
+		return repository;
+	}
+
+	@Transactional(readOnly = true)
+	public E getById(Integer id) {
+		if(entityClass.getAnnotation(DisableCache.class) != null) { //关掉缓存
+			return getById(id, true);
+		}else {
+			return getById(id, false);
+		}
+	}
+	
+	@Transactional(readOnly = true)
+	public E getById(Integer id, boolean withoutCache) {//这里出问题了,get出来是空对象
+		
+		if(withoutCache) { //关掉缓存
+			return getRepository().findById(id).orElse(null);
+		}
+		
+		@SuppressWarnings("unchecked")
+		E e = (E)cacheChannel.get(GlobalEx.CACHEREGION_ENTITY, entityClass.getSimpleName() + "-" + id.toString()).getValue();
+		if(e == null) {
+			E db = getRepository().findById(id).orElse(null);
+			if(db != null) {
+				buildCache(db);
+//				cacheChannel.set(GlobalEx.CACHEREGION_ENTITY, entityClass.getSimpleName() + "-" + id.toString(), db);
+			}
+			return db;
+		}else {
+			return e;
+		}
+	}
+
+	@Transactional(readOnly = true)
+	public List<E> getByIdBatch(Collection<Integer> idList) {
+		if(entityClass.getAnnotation(DisableCache.class) != null) { //关掉缓存
+			return getByIdBatch(idList, true);
+		}else {
+			return getByIdBatch(idList, false);
+		}
+	}
+
+	@Transactional(readOnly = true)
+	public List<E> getByIdBatch(Collection<Integer> idList, boolean withoutCache) {
+		Map<Integer, E> cachedEntity = new HashMap<Integer, E>();
+		if(!withoutCache) {
+			for(CacheObject cacheObject: cacheChannel.get(GlobalEx.CACHEREGION_ENTITY, idList.stream().map(item -> entityClass.getSimpleName() + "-" + item.toString()).collect(Collectors.toList())).values()) {
+				E e = (E)cacheObject.getValue();
+				if(e != null) {
+					cachedEntity.put(e.getId(), e);
+				}
+			}
+		}
+		Map<Integer, E> dbEntity = new HashMap<Integer, E>();
+		if(idList.size() > cachedEntity.size()) {
+			Collection<Integer> dbIdList = CollectionUtils.subtract(idList, cachedEntity.keySet());
+			dbEntity = getRepository().findAllById(dbIdList).stream().collect(Collectors.toMap(BaseEntity::getId, p -> p, (k1, k2) -> k1));
+		}
+		List<E> result = new ArrayList<>();
+		for(Integer id: idList) {
+			E e = cachedEntity.get(id);
+			if(e == null) {
+				e = dbEntity.get(id);
+			}
+			if(e != null) {
+				result.add(e);
+			}
+		}
+		return result;
+	}
+
+	public void buildCache(E e) {
+		cacheChannel.set(GlobalEx.CACHEREGION_ENTITY, entityClass.getSimpleName() + "-" + e.getId().toString(), e);
+	}
+	
+	public E cloneById(Integer id){
+		try {
+			E cloneE = entityClass.newInstance();
+			E entity = getById(id);
+			if(entity == null) {
+				return null;
+			}
+			BeanUtils.copyProperties(getById(id), cloneE);
+			return cloneE;
+		}catch(InstantiationException|IllegalAccessException e) {
+			throw new RuntimeException(e);
+		}
+	}
+	
+	@Transactional(readOnly = true)
+	public List<E> listAll() {
+		return getRepository().findAll();
+	}
+
+	public List<E> listAll(Sort sort) {
+		return getRepository().findAll(sort);
+	}
+
+	@Transactional
+	public E save(E data){
+		E result = getRepository().save(data);
+		return result;
+	}
+	
+	@Transactional
+	public void save(Iterable<E> dataList){
+		getRepository().saveAll(dataList);
+	}
+	
+	@Transactional
+	public E saveAndFlush(E data){
+        return getRepository().saveAndFlush(data);
+	}
+
+	@Transactional
+	public void delete(E data){
+		getRepository().delete(data); //EntityOperationListener会自动同步缓存
+	}
+
+	@Transactional
+	public void delete(Iterable<E> dataList){
+		getRepository().deleteAll(dataList); //EntityOperationListener会自动同步缓存
+	}
+
+	@Transactional
+	public void deleteById(Integer id){
+		getRepository().deleteById(id);
+	}
+
+	@Transactional(readOnly = true)
+	public List<E> findByKey(String fieldName, Collection<?> values, Order... orders){
+		if(orders.length == 0){
+			return getRepository().findAll(new Criteria<E>().add(Restrictions.in(fieldName, values)));
+		}else{
+			return getRepository().findAll(new Criteria<E>().add(Restrictions.in(fieldName, values)), Sort.by(orders));
+		}
+	}
+
+	@Transactional(readOnly = true)
+	public List<E> findByKey(String fieldName, Object value, Order... orders){
+		if(orders.length == 0){
+			return getRepository().findAll(new Criteria<E>().add(Restrictions.eq(fieldName, value)));
+		}else{
+			return getRepository().findAll(new Criteria<E>().add(Restrictions.eq(fieldName, value)), Sort.by(orders));
+		}
+	}
+
+	/**
+	 * 以orderField字段默认升序排序,通常用于position字段
+	 * @param fieldNames
+	 * @param values
+	 * @param orderField
+	 * @return
+	 */
+	@Transactional(readOnly = true)
+	public List<E> findByKeyAsc(String fieldName, Object value, String orderField) {
+		return findByKey(fieldName, value, Order.asc(orderField));
+	}
+
+	/**
+	 * 以orderField字段默认降序排序,通常用于时间字段
+	 * @param fieldNames
+	 * @param values
+	 * @param orderField
+	 * @return
+	 */
+	@Transactional(readOnly = true)
+	public List<E> findByKeyDesc(String fieldName, Object value, String orderField) {
+		return findByKey(fieldName, value, Order.desc(orderField));
+	}
+
+	@Transactional(readOnly = true)
+	public E getByKey(String fieldName, Object value){
+		return getRepository().findOne(new Criteria<E>().add(Restrictions.eq(fieldName, value))).orElse(null);
+	}
+
+	/**
+	 * 以orderField字段默认升序排序,通常用于position字段
+	 * @param fieldNames
+	 * @param values
+	 * @param orderField
+	 * @return
+	 */
+	@Transactional(readOnly = true)
+	public List<E> findByMultiKeyAsc(String[] fieldNames, Object[] values, String orderField) {
+		return findByMultiKey(fieldNames, values, Order.asc(orderField));
+	}
+
+	/**
+	 * 以orderField字段默认降序排序,通常用于时间字段
+	 * @param fieldNames
+	 * @param values
+	 * @param orderField
+	 * @return
+	 */
+	@Transactional(readOnly = true)
+	public List<E> findByMultiKeyDesc(String[] fieldNames, Object[] values, String orderField) {
+		return findByMultiKey(fieldNames, values, Order.desc(orderField));
+	}
+
+	@Transactional(readOnly = true)
+	public List<E> findByMultiKey(String[] fieldNames, Object[] values, Order... orders) {
+		Criteria<E> criteria = new Criteria<E>();
+		for(int i = 0; i < fieldNames.length; i ++) {
+			if(i <= values.length - 1) {
+				Object value = values[i];
+				if(value instanceof Collection) {
+					criteria.add(Restrictions.in(fieldNames[i], (Collection<?>)value));
+				}else if(value instanceof SearchRange) {
+					SearchRange searchRange = (SearchRange)value;
+					criteria.add(Restrictions.gte(fieldNames[i], searchRange.getStart()));
+					criteria.add(Restrictions.lte(fieldNames[i], searchRange.getEnd()));
+				}else {
+					criteria.add(Restrictions.eq(fieldNames[i], value));
+				}
+			}
+		}
+		if(orders.length == 0){
+			return getRepository().findAll(criteria);
+		}else{
+			return getRepository().findAll(criteria, Sort.by(orders));
+		}
+	}
+
+	public E getByMultiKey(String[] fieldNames, Object... values) {
+		Criteria<E> criteria = new Criteria<E>();
+		for(int i = 0; i < fieldNames.length; i ++) {
+			if(i <= values.length - 1) {
+				Object value = values[i];
+				if(value instanceof Collection) {
+					criteria.add(Restrictions.in(fieldNames[i], (Collection<?>)value));
+				}else if(value instanceof SearchRange) {
+					SearchRange searchRange = (SearchRange)value;
+					criteria.add(Restrictions.gte(fieldNames[i], searchRange.getStart()));
+					criteria.add(Restrictions.lte(fieldNames[i], searchRange.getEnd()));
+				}else {
+					criteria.add(Restrictions.eq(fieldNames[i], value));
+				}
+			}
+		}
+		return getRepository().findOne(criteria).orElse(null);
+	}
+
+	public int countByKey(String fieldName, Collection<?> values){
+		return new Long(getRepository().count(new Criteria<E>().add(Restrictions.in(fieldName, values)))).intValue();
+	}
+
+	public int countByKey(String fieldName, Object value){
+		return new Long(getRepository().count(new Criteria<E>().add(Restrictions.eq(fieldName, value)))).intValue();
+	}
+	
+	public int countByMultiKey(String[] fieldNames, Object... values) {
+		Criteria<E> criteria = new Criteria<E>();
+		for(int i = 0; i < fieldNames.length; i ++) {
+			if(i <= values.length - 1) {
+				Object value = values[i];
+				if(value instanceof Collection) {
+					criteria.add(Restrictions.in(fieldNames[i], (Collection<?>)value));
+				}else {
+					criteria.add(Restrictions.eq(fieldNames[i], value));
+				}
+			}
+		}
+		return new Long(getRepository().count(criteria)).intValue();
+	}
+
+	public void evictCache(BaseEntity e) {
+		cacheChannel.evict(GlobalEx.CACHEREGION_ENTITY, entityClass.getSimpleName() + "-" + e.getId().toString());
+	}
+}
+

+ 12 - 0
src/main/java/org/ccframe/commons/base/DisableCache.java

@@ -0,0 +1,12 @@
+package org.ccframe.commons.base;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+@Target({ElementType.TYPE})
+@Retention(RetentionPolicy.SOURCE)
+public @interface DisableCache {
+	
+}

+ 8 - 0
src/main/java/org/ccframe/commons/base/ICodeEnum.java

@@ -0,0 +1,8 @@
+package org.ccframe.commons.base;
+
+import java.util.List;
+
+public interface ICodeEnum {
+	String toCode();
+	List<ICodeEnum> valueList();
+}

+ 11 - 0
src/main/java/org/ccframe/commons/base/IHasSearchBuilder.java

@@ -0,0 +1,11 @@
+package org.ccframe.commons.base;
+
+import java.io.Serializable;
+
+public interface IHasSearchBuilder<E extends Serializable> {
+
+	Integer getPriority();
+
+	void buildAllIndex(boolean forceDelete);
+
+}

+ 9 - 0
src/main/java/org/ccframe/commons/base/IProcessor.java

@@ -0,0 +1,9 @@
+package org.ccframe.commons.base;
+
+/**
+ * @author JIM
+ *
+ */
+public interface IProcessor {
+	void process();
+}

+ 116 - 0
src/main/java/org/ccframe/commons/base/OffsetBasedPageRequest.java

@@ -0,0 +1,116 @@
+package org.ccframe.commons.base;
+
+import java.io.Serializable;
+
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.springframework.data.domain.Pageable;
+import org.springframework.data.domain.Sort;
+import org.springframework.data.domain.Sort.Order;
+
+public class OffsetBasedPageRequest implements Pageable, Serializable {
+
+    /**
+	 * 
+	 */
+	private static final long serialVersionUID = 810579142489422257L;
+	private int limit;
+    private long offset;
+    private final Sort sort;
+
+    public OffsetBasedPageRequest(long offset, int limit, Order... orders) {
+    	this(offset, limit, Sort.by(orders));
+    }
+
+    /**
+     * Creates a new {@link OffsetBasedPageRequest} with sort parameters applied.
+     *
+     * @param offset zero-based offset.
+     * @param limit  the size of the elements to be returned.
+     * @param sort   can be {@literal null}.
+     */
+    public OffsetBasedPageRequest(long offset, int limit, Sort sort) {
+        if (offset < 0) {
+            throw new IllegalArgumentException("Offset index must not be less than zero!");
+        }
+
+        if (limit < 1) {
+            throw new IllegalArgumentException("Limit must not be less than one!");
+        }
+        this.limit = limit;
+        this.offset = offset;
+        this.sort = sort;
+    }
+
+    /**
+     * Creates a new {@link OffsetBasedPageRequest} with sort parameters applied.
+     *
+     * @param offset     zero-based offset.
+     * @param limit      the size of the elements to be returned.
+     * @param direction  the direction of the {@link Sort} to be specified, can be {@literal null}.
+     * @param properties the properties to sort by, must not be {@literal null} or empty.
+     */
+    public OffsetBasedPageRequest(int offset, int limit, Sort.Direction direction, String... properties) {
+        this(offset, limit, Sort.by(direction, properties));
+    }
+
+    @Override
+    public int getPageNumber() {
+        return (int)(offset / limit);
+    }
+
+    @Override
+    public int getPageSize() {
+        return limit;
+    }
+
+    @Override
+    public long getOffset() {
+        return offset;
+    }
+
+    @Override
+    public Sort getSort() {
+        return sort;
+    }
+
+    @Override
+    public Pageable next() {
+        return new OffsetBasedPageRequest(getOffset() + getPageSize(), getPageSize(), getSort());
+    }
+
+    public OffsetBasedPageRequest previous() {
+        return hasPrevious() ? new OffsetBasedPageRequest(getOffset() - getPageSize(), getPageSize(), getSort()) : this;
+    }
+
+
+    @Override
+    public Pageable previousOrFirst() {
+        return hasPrevious() ? previous() : first();
+    }
+
+    @Override
+    public Pageable first() {
+        return new OffsetBasedPageRequest(0, getPageSize(), getSort());
+    }
+
+    @Override
+    public boolean hasPrevious() {
+        return offset > limit;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) return true;
+
+        if (!(o instanceof OffsetBasedPageRequest)) return false;
+
+        OffsetBasedPageRequest that = (OffsetBasedPageRequest) o;
+
+        return new EqualsBuilder()
+                .append(limit, that.limit)
+                .append(offset, that.offset)
+                .append(sort, that.sort)
+                .isEquals();
+    }
+
+}

+ 22 - 0
src/main/java/org/ccframe/commons/base/OperationLogMapper.java

@@ -0,0 +1,22 @@
+package org.ccframe.commons.base;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * 记录操作日志时,标记日志字段中文名及Code文本映射
+ * @author Jim
+ *
+ */
+@Target({ ElementType.FIELD})
+@Inherited
+@Documented
+@Retention(RetentionPolicy.RUNTIME)
+public @interface OperationLogMapper {
+	String value();
+	String enumText() default "";
+}

+ 48 - 0
src/main/java/org/ccframe/commons/base/RedisIDGenerator.java

@@ -0,0 +1,48 @@
+package org.ccframe.commons.base;
+
+import java.io.Serializable;
+import java.util.Properties;
+
+import org.ccframe.commons.helper.SpringContextHelper;
+import org.ccframe.config.Global;
+import org.hibernate.HibernateException;
+import org.hibernate.MappingException;
+import org.hibernate.dialect.Dialect;
+import org.hibernate.engine.spi.SharedSessionContractImplementor;
+import org.hibernate.id.Configurable;
+import org.hibernate.id.IdentityGenerator;
+import org.hibernate.id.PostInsertIdentityPersister;
+import org.hibernate.id.insert.InsertGeneratedIdentifierDelegate;
+import org.hibernate.service.ServiceRegistry;
+import org.hibernate.type.Type;
+import org.redisson.api.RAtomicLong;
+import org.redisson.api.RedissonClient;
+
+public class RedisIDGenerator extends IdentityGenerator implements Configurable{
+
+	private String name;
+	
+	private RAtomicLong atomicLong;
+	
+	private RAtomicLong getRAtomicLong() {
+		if(atomicLong == null) {
+			atomicLong = SpringContextHelper.getBean(RedissonClient.class).getAtomicLong(Global.REDIS_PERFIX + name);
+			if(atomicLong.get() == 0) {
+				atomicLong.getAndIncrement();
+			}
+		}
+		return atomicLong;
+	}
+
+	@Override
+	public void configure(Type type, Properties params, ServiceRegistry serviceRegistry) throws MappingException {
+		name = params.getProperty("GENERATOR_NAME");
+	}
+
+	@Override
+	public Serializable generate(SharedSessionContractImplementor session, Object obj) {
+		return new Long(getRAtomicLong().getAndIncrement()).intValue();
+	}
+	
+
+}

+ 49 - 0
src/main/java/org/ccframe/commons/base/SearchRange.java

@@ -0,0 +1,49 @@
+package org.ccframe.commons.base;
+
+import java.util.Date;
+
+import org.ccframe.commons.util.UtilDateTime;
+import org.ccframe.config.GlobalEx;
+
+public class SearchRange {
+
+	//String ISO_8601_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSSZZ"; //TODO,测试最新的系统是否还支持此时间范围
+	String ISO_8601_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSSZZ"; //TODO,可能有问题,因为CCFRAME修改了持久化时间的格式
+	
+	private Object start;
+	private Object end;
+	
+	public SearchRange(Object start, Object end) {
+		super();
+		this.start = start;
+		this.end = end;
+	}
+	public Object getStart() {
+		return start;
+	}
+	public void setStart(Object start) {
+		this.start = start;
+	}
+	public Object getEnd() {
+		return end;
+	}
+	public void setEnd(Object end) {
+		this.end = end;
+	}
+	public Object getStartSearchValue() {
+		if(start instanceof Date) {
+			//return UtilDateTime.convertDateToString((Date)start, ISO_8601_DATE_FORMAT);
+			return start;
+		}else {
+			return start;
+		}
+	}
+	public Object getEndSearchValue() {
+		if(end instanceof Date) {
+			//return UtilDateTime.convertDateToString((Date)end, ISO_8601_DATE_FORMAT);
+			return end;
+		}else {
+			return end;
+		}
+	}
+}

+ 20 - 0
src/main/java/org/ccframe/commons/base/TreeNodeTree.java

@@ -0,0 +1,20 @@
+package org.ccframe.commons.base;
+
+import java.util.List;
+
+import org.ccframe.subsys.core.domain.entity.TreeNode;
+
+public class TreeNodeTree extends TreeNode{
+	
+	private static final long serialVersionUID = 8849790533771490251L;
+
+	private List<TreeNodeTree> subNodeTree;
+	
+	public List<TreeNodeTree> getSubNodeTree() {
+		return subNodeTree;
+	}
+
+	public void setSubNodeTree(List<TreeNodeTree> subNodeTree) {
+		this.subNodeTree = subNodeTree;
+	}	
+}

+ 232 - 0
src/main/java/org/ccframe/commons/data/JExcelWriter.java

@@ -0,0 +1,232 @@
+package org.ccframe.commons.data;
+
+import org.apache.commons.jxpath.JXPathContext;
+import org.apache.commons.jxpath.JXPathNotFoundException;
+import org.apache.poi.hssf.usermodel.HSSFWorkbook;
+import org.apache.poi.poifs.filesystem.POIFSFileSystem;
+import org.apache.poi.ss.usermodel.*;
+import org.apache.poi.ss.util.CellRangeAddress;
+import org.apache.poi.xssf.usermodel.XSSFWorkbook;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.*;
+import java.util.Map.Entry;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * 依赖模板EXCEL填充的导出类.
+ * 填充数据依据xpath来定位对象数据.
+ * 支持EXCEL头部尾部
+ * 如果不考虑背景色的话,建议使用xls而不是xlsx来实现循环输出,性能差约5倍
+ *
+ * @author Jim
+ */
+public class JExcelWriter {
+
+    private Workbook templateWorkbook;
+    private static Pattern pattern = Pattern.compile("\\{\\=(.+?)\\}");
+    private boolean xls2007;
+
+    public JExcelWriter(String templeteFile){
+        xls2007 = templeteFile.toUpperCase().endsWith(".XLSX"); //建议使用xls模式填充
+
+        try(FileInputStream fileInputStream = new FileInputStream(templeteFile);){
+            templateWorkbook = xls2007 ? new XSSFWorkbook(fileInputStream) : new HSSFWorkbook(new POIFSFileSystem(fileInputStream));
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    private void setOutputCellType(Cell templateCell, Cell outputCell, JXPathContext objectContext) {
+        switch (templateCell.getCellType()) {
+            case Cell.CELL_TYPE_FORMULA:
+                outputCell.setCellType(Cell.CELL_TYPE_FORMULA);
+                outputCell.setCellFormula(templateCell.getCellFormula());
+                break;
+            case Cell.CELL_TYPE_NUMERIC:
+                outputCell.setCellType(Cell.CELL_TYPE_NUMERIC);
+                outputCell.setCellValue(templateCell.getNumericCellValue());
+                break;
+            case Cell.CELL_TYPE_STRING:
+                String templateCellValue = templateCell.getStringCellValue();
+                Matcher matcher = pattern.matcher(templateCellValue);
+                Map<String, Object> replacerMap = new HashMap<String, Object>();
+                while (matcher.find()) {
+                    String xpath = matcher.group(1);
+                    Object valueObj = null;
+                    try {
+                        valueObj = objectContext.getValue(xpath);
+                    } catch (JXPathNotFoundException e) {
+                        //forget it
+                    }
+                    replacerMap.put(matcher.group(0), valueObj == null ? "" : valueObj);
+                }
+                if (replacerMap.size() == 1 && templateCellValue.equals(replacerMap.keySet().iterator().next())) {
+                    Object valueObj = replacerMap.values().iterator().next();
+                    if (valueObj instanceof Integer) {
+                        outputCell.setCellType(Cell.CELL_TYPE_NUMERIC);
+                        outputCell.setCellValue((Integer) valueObj);
+                    } else if (valueObj instanceof Double) {
+                        outputCell.setCellType(Cell.CELL_TYPE_NUMERIC);
+                        outputCell.setCellValue((Double) valueObj);
+                    } else {
+                        outputCell.setCellType(Cell.CELL_TYPE_STRING);
+                        outputCell.setCellValue(valueObj.toString());
+                    }
+                    break;
+                } else {
+                    for (Entry<String, Object> entry : replacerMap.entrySet()) {
+                        templateCellValue = templateCellValue.replace(entry.getKey(), entry.getValue().toString());
+                    }
+                }
+                outputCell.setCellType(Cell.CELL_TYPE_STRING);
+                outputCell.setCellValue(templateCellValue);
+                break;
+            default:
+                outputCell.setCellType(Cell.CELL_TYPE_BLANK);
+                outputCell.setCellValue("");
+                break;
+        }
+    }
+
+    public void fillToStream(Object object, OutputStream outStream) {
+    	fillTo(object, null, outStream, true);
+    }
+
+    public void fillToFile(Object object, String outFile) {
+    	fillTo(object, outFile, null, false);
+    }
+
+    private void fillTo(Object object, String outFile, OutputStream outStream, boolean steamMode) {
+        JXPathContext objectContext = JXPathContext.newContext(object);
+        try (
+			Workbook workbook = xls2007 ? new XSSFWorkbook(): new HSSFWorkbook();
+        ) {
+            Sheet outputSheet = workbook.createSheet();
+            Sheet templateSheet = ensureOpenSheet(templateWorkbook);
+
+            outputSheet.setDefaultColumnWidth(templateSheet.getDefaultColumnWidth());
+            outputSheet.setDefaultRowHeight(templateSheet.getDefaultRowHeight());
+            for (int i = 0; i < 100; i++) {
+                outputSheet.setColumnWidth(i, (int) (templateSheet.getColumnWidth(i) * 1.15));
+            }
+
+            int iterateRow = -1;
+            int iterateCount = 1; //占位会有一行,如果没有输出循环,则行数-1
+
+            //拷贝行列数据
+            for (int j = 0; j < templateSheet.getLastRowNum() + 1; j++) {
+				Row templateRow = templateSheet.getRow(j);
+
+				//检查是否有循环标记
+				Comment commentExpression = templateRow.getCell(0).getCellComment();
+				if(commentExpression != null){ //循环输出
+					iterateRow = j;
+					iterateCount = 0; //准备输出
+					try {
+						Object iterateObj = objectContext.getValue(commentExpression.getString().getString());
+						if (iterateObj.getClass().isArray() || iterateObj instanceof Collection) {
+							List<Object> iterateDataList = (iterateObj instanceof Collection ? new ArrayList<Object>((Collection)iterateObj): Arrays.asList(iterateObj));
+
+							List<CellStyle> cellStyleCache = new ArrayList<>();
+							for(Object rowObj: iterateDataList){
+								JXPathContext rowObjectContext = JXPathContext.newContext(rowObj);
+
+								Row outputRow = outputSheet.createRow(j + iterateCount);
+								iterateCount ++;
+
+								outputRow.setHeight(templateRow.getHeight());
+								Iterator<Cell> cellIterator = templateRow.cellIterator();
+
+								for (int i = 0; cellIterator.hasNext(); i++) {
+									Cell outputCell = outputRow.createCell(i);
+									Cell templateCell = cellIterator.next();
+									if(i > cellStyleCache.size() - 1){
+                                        CellStyle style = workbook.createCellStyle();
+                                        style.cloneStyleFrom(templateCell.getCellStyle());
+                                        outputCell.setCellStyle(style);
+                                        cellStyleCache.add(style);
+                                    }else{
+                                        CellStyle style = cellStyleCache.get(i);
+                                        outputCell.setCellStyle(style);
+                                    }
+
+									setOutputCellType(templateCell, outputCell, rowObjectContext);
+								}
+							}
+						}
+					} catch (JXPathNotFoundException e) {
+						//forget it
+					}
+				}else{ //直接输出
+					Row outputRow = outputSheet.createRow(j - 1 + iterateCount);
+
+					outputRow.setHeight(templateRow.getHeight());
+					Iterator<Cell> cellIterator = templateRow.cellIterator();
+
+					int offset = 0;
+					for (int i = 0; cellIterator.hasNext(); i++) {
+						Cell templateCell = cellIterator.next();
+                        Cell outputCell = outputRow.createCell(templateCell.getColumnIndex());
+						CellStyle style = workbook.createCellStyle();
+						style.cloneStyleFrom(templateCell.getCellStyle());
+
+//						style.setFillForegroundColor(IndexedColors.BRIGHT_GREEN.getIndex());
+
+                        outputCell.setCellStyle(style);
+
+						setOutputCellType(templateCell, outputCell, objectContext);
+					}
+				}
+            }
+
+			//拷贝合并
+			for (int i = 0; i < templateSheet.getNumMergedRegions(); i++) {
+				CellRangeAddress region = templateSheet.getMergedRegion(i);
+				CellRangeAddress copyRegion = region.copy();
+				//位置修正:跳过循环体
+				if(iterateRow > -1){
+					//如果合并单元格跨循环体或在循环体则忽略
+					if(copyRegion.getFirstRow() >= iterateRow && copyRegion.getLastRow() <= iterateRow){
+						continue;
+					}else{
+						if(copyRegion.getFirstRow() > iterateRow){
+							copyRegion.setFirstRow(copyRegion.getFirstRow() - 1 + iterateCount);
+							copyRegion.setLastRow(copyRegion.getLastRow() - 1 + iterateCount);
+						}
+					}
+				}
+				outputSheet.addMergedRegion(copyRegion);
+			}
+
+			if(steamMode) {
+				workbook.write(outStream);
+			}else {
+				File excelFile = new File(outFile);
+	            if (excelFile.getParentFile() != null && !excelFile.getParentFile().exists()) {
+	                excelFile.getParentFile().mkdirs();//NOSONAR
+	            }
+	            try (
+	                FileOutputStream fileOutputStream = new FileOutputStream(outFile);
+	            ) {
+	                workbook.write(fileOutputStream);
+	            }
+			}
+        } catch (IOException e) {
+            e.printStackTrace();
+        }
+    }
+
+    protected Sheet ensureOpenSheet(Workbook workbook) {
+        int sheetIndex = workbook.getNumberOfSheets();
+        if (sheetIndex == 0) {
+            workbook.createSheet();
+        }
+        return workbook.getSheetAt(0);
+    }
+}

+ 141 - 0
src/main/java/org/ccframe/commons/dbunit/DBUnitExport.java

@@ -0,0 +1,141 @@
+package org.ccframe.commons.dbunit;
+
+import org.apache.tools.ant.Project;
+import org.dbunit.DatabaseUnitException;
+import org.dbunit.ant.Export;
+import org.dbunit.database.IDatabaseConnection;
+import org.dbunit.dataset.DataSetException;
+import org.dbunit.dataset.IDataSet;
+import org.dbunit.dataset.ReplacementDataSet;
+import org.dbunit.dataset.csv.CsvDataSetWriter;
+import org.dbunit.dataset.excel.XlsDataSet;
+import org.dbunit.dataset.xml.FlatDtdDataSet;
+import org.dbunit.dataset.xml.XmlDataSet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.ccframe.commons.util.FlatXmlWriterEx;
+
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.UnsupportedEncodingException;
+import java.sql.SQLException;
+import java.util.Arrays;
+
+
+/**
+ * 将数据库数据导出为DBUnit兼容格式.
+ * @author Jim Wu
+ *
+ */
+public class DBUnitExport extends Export{
+
+    private static final Logger logger = LoggerFactory.getLogger(DBUnitExport.class);
+    private FlatXmlWriterEx datasetWriter;
+    
+    public DBUnitExport(){
+    	this.setOrdered(true);
+    }
+
+	@Override
+	protected IDataSet getExportDataSet(IDatabaseConnection connection)
+			throws DatabaseUnitException, SQLException {
+		ReplacementDataSet replacementDataSet = new ReplacementDataSet(super.getExportDataSet(connection));
+		replacementDataSet.addReplacementObject(null, "[null]");
+		return replacementDataSet;
+	}
+
+	@Override
+    public void execute(IDatabaseConnection connection) throws DatabaseUnitException{
+        logger.debug("execute(connection={}) - start", connection);
+
+        try
+        {
+            if (getDest() == null)
+            {
+                throw new DatabaseUnitException("'getDest()' is a required attribute of the <export> step.");
+            }
+
+            IDataSet dataset = getExportDataSet(connection);
+            logger.debug("dataset tables: " + Arrays.asList(dataset.getTableNames()), Project.MSG_VERBOSE);
+
+			
+            // Write the dataset
+            if (getFormat().equals(FORMAT_CSV))
+            {
+                CsvDataSetWriter.write(dataset, getDest());
+            }
+            else
+            {
+                OutputStream out = new FileOutputStream(getDest());
+                try
+                {
+                    if (getFormat().equalsIgnoreCase(FORMAT_FLAT))
+                    {
+                    	writeXML(dataset, out, getEncoding(), getDoctype());
+                    }
+                    else if (getFormat().equalsIgnoreCase(FORMAT_XML))
+                    {
+                        XmlDataSet.write(dataset, out, getEncoding());
+                    }
+                    else if (getFormat().equalsIgnoreCase(FORMAT_DTD))
+                    {
+                        //TODO Should DTD also support encoding? It is basically an XML file...
+                        FlatDtdDataSet.write(dataset, out);//, getEncoding());
+                    }
+                    else if (getFormat().equalsIgnoreCase(FORMAT_XLS))
+                    {
+                        XlsDataSet.write(dataset, out);
+                    }
+                    else
+                    {
+                        throw new IllegalArgumentException("The given format '"+getFormat()+"' is not supported.");
+                    }
+                    
+                }
+                finally
+                {
+                    out.close();
+                }
+            }
+            
+            logger.debug("Successfully wrote file '" + getDest() + "'", Project.MSG_INFO);
+            
+        }
+        catch (SQLException e)
+        {
+        	throw new DatabaseUnitException(e);
+        }
+        catch (IOException e)
+        {
+            throw new DatabaseUnitException(e);
+        }
+	}
+
+	private void writeXML(IDataSet dataset, OutputStream out, String encoding, String docType) throws UnsupportedEncodingException, DataSetException {
+		synchronized(this){
+			datasetWriter = new FlatXmlWriterEx(out, encoding);
+			datasetWriter.setDocType(docType);
+	        datasetWriter.write(dataset);
+	        datasetWriter = null;
+		}
+	}
+	
+	public int getTotalTableCount() {
+		return datasetWriter == null ? 0: datasetWriter.getTotalTableCount();
+	}
+
+	public int getProcessedTableCount() {
+		return datasetWriter == null ? 0: datasetWriter.getProcessedTableCount();
+	}
+
+	public String getProcessingTableName() {
+		return datasetWriter == null ? null: datasetWriter.getProcessingTableName();
+	}
+
+	public int getCurrentTableTotalCount() {
+		return datasetWriter == null ? 0: datasetWriter.getCurrentTableTotalCount();
+	}
+	
+}

+ 8 - 0
src/main/java/org/ccframe/commons/dbunit/DatabaseOperationEx.java

@@ -0,0 +1,8 @@
+package org.ccframe.commons.dbunit;
+
+import org.dbunit.operation.DatabaseOperation;
+import org.dbunit.operation.DeleteWhereOperation;
+
+public abstract class DatabaseOperationEx extends DatabaseOperation{
+    public static final DatabaseOperation DELETE_WHERE = new DeleteWhereOperation();
+}

+ 123 - 0
src/main/java/org/ccframe/commons/dbunit/InitFileReplacementProcessor.java

@@ -0,0 +1,123 @@
+package org.ccframe.commons.dbunit;
+
+import java.io.File;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.lang3.StringUtils;
+import org.dbunit.database.IDatabaseConnection;
+import org.dbunit.dataset.ITable;
+import org.redisson.api.RAtomicLong;
+import org.redisson.api.RedissonClient;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.ccframe.commons.helper.SpringContextHelper;
+import org.ccframe.commons.util.DbUnitUtils.DBTYPE;
+import org.ccframe.config.Global;
+import org.ccframe.config.GlobalEx;
+
+/**
+ * 文件初始化处理器,每个XML会新建一个
+ *  
+ * 文件系统初始化逻辑:
+ * 先获得系统文件信息表的序列,将值写入对应的表数据,然后在数据内置完后获得这些数据的主键值并插入文件信息.
+ * 
+ * $F{File下的目录分隔符都使用\,会自动根据win/linux替换
+ * 
+ * @author JIM
+ *
+ */
+public class InitFileReplacementProcessor implements ReplacementProcessor {
+
+	private static final String FILE_START_DELIMITER = "$F{File:";
+	private static final String FILE_END_DELIMITER = "}F$";
+
+	private static List<InitImageSaveRow> initImageSaveRowList = new ArrayList<InitImageSaveRow>();
+
+	private static Integer increment;
+	
+	private static String initImageDir;
+
+	private Logger logger = LoggerFactory.getLogger(InitFileReplacementProcessor.class);
+	
+	private RAtomicLong atomicLong;
+	
+	public static String getInitImageDir() {
+		if(initImageDir == null) {
+			initImageDir = GlobalEx.APP_BASE_DIR + File.separator + GlobalEx.INIT_IMAGE_DIR + File.separator;
+		}
+		return initImageDir;
+	}
+
+	private RAtomicLong getRAtomicLong() {
+		if(atomicLong == null) {
+			atomicLong = SpringContextHelper.getBean(RedissonClient.class).getAtomicLong(Global.REDIS_PERFIX + "fileInfId");
+		}
+		return atomicLong;
+	}
+	
+	private int getIncrement() {
+		long next = getRAtomicLong().get();
+		if(next == 0) { // 数据库ID校正到1
+			atomicLong.getAndIncrement();
+			increment = 1;
+		}else {
+			if(increment == null) {
+				increment = new Long(next).intValue();
+			}else {
+				increment ++;
+			}
+		}
+		return increment;
+	}
+
+	@Override
+	public Object replacementSubStrToObject(ITable table, String column, String substring, IDatabaseConnection connection, DBTYPE dbType){
+
+		File file = new File(getInitImageDir() + substring.replace('\\', File.separatorChar));
+		if(!file.exists()){
+			logger.error("找不到文件" + substring + ", ID 设置为 0");
+			return "0"; //文件ID不存在填充0
+		}
+		String tableName = table.getTableMetaData().getTableName();
+		Integer result = getIncrement();
+		initImageSaveRowList.add(new InitImageSaveRow(dbFieldNameToClassFieldName(tableName.substring(4), true), dbFieldNameToClassFieldName(column, false), result, substring));
+		return result;
+	}
+
+	@Override
+	public String getStartDelim() {
+		return FILE_START_DELIMITER;
+	}
+
+	@Override
+	public String getEndDelim() {
+		return FILE_END_DELIMITER;
+	}
+
+	public static List<InitImageSaveRow> getInitImageSaveRowList() {
+		return initImageSaveRowList;
+	}
+
+    /**
+     * 数据库格式转驼峰式,例如FILE_TYPE_NM->fileTypeNm
+     * @param dbFieldName
+     * @param isClass 是否为表名,表名则首字符也大写
+     * @return
+     */
+    private String dbFieldNameToClassFieldName(String dbFieldName, boolean isClass){
+    	List<String> newFieldList = new ArrayList<String>();
+    	for(String str: dbFieldName.split("_")) {
+    		if(newFieldList.size() > 0 || isClass) {
+    			newFieldList.add(Character.toUpperCase(str.charAt(0)) + str.substring(1).toLowerCase());
+    		}else {
+    			newFieldList.add(str.toLowerCase());
+    		}
+    	}
+    	return StringUtils.join(newFieldList, "");
+    }
+
+}

+ 48 - 0
src/main/java/org/ccframe/commons/dbunit/InitImageSaveRow.java

@@ -0,0 +1,48 @@
+package org.ccframe.commons.dbunit;
+
+public class InitImageSaveRow {
+	
+	private String businessClassNm;
+	
+	private String columnNm;
+	
+	private Integer sysFileInfId;
+	
+	private String initImageNm;
+	
+	public InitImageSaveRow(String businessClassNm, String columnNm, Integer sysFileInfId, String initImageNm) {
+		this.businessClassNm = businessClassNm;
+		this.columnNm = columnNm;
+		this.sysFileInfId = sysFileInfId;
+		this.initImageNm = initImageNm;
+	}
+	public String getBusinessClassNm() {
+		return businessClassNm;
+	}
+	public void setBusinessClassNm(String businessClassNm) {
+		this.businessClassNm = businessClassNm;
+	}
+	public String getColumnNm() {
+		return columnNm;
+	}
+	public void setColumnNm(String columnNm) {
+		this.columnNm = columnNm;
+	}
+	public Integer getSysFileInfId() {
+		return sysFileInfId;
+	}
+	public void setSysFileInfId(Integer sysFileInfId) {
+		this.sysFileInfId = sysFileInfId;
+	}
+	public String getInitImageNm() {
+		return initImageNm;
+	}
+	public void setInitImageNm(String initImageNm) {
+		this.initImageNm = initImageNm;
+	}
+	@Override
+	public String toString() {
+		return "InitImageSaveRow [businessClassNm=" + businessClassNm + ", columnNm=" + columnNm + ", sysFileInfId="
+				+ sysFileInfId + ", initImageNm=" + initImageNm + "]";
+	}
+}

+ 158 - 0
src/main/java/org/ccframe/commons/dbunit/RandomReplacementProcessor.java

@@ -0,0 +1,158 @@
+package org.ccframe.commons.dbunit;
+
+import java.sql.SQLException;
+import java.util.Date;
+import java.util.Random;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang.math.RandomUtils;
+import org.dbunit.database.IDatabaseConnection;
+import org.dbunit.dataset.ITable;
+import org.jboss.logging.Logger;
+
+import org.ccframe.commons.util.DbUnitUtils.DBTYPE;
+import org.ccframe.commons.util.UtilDateTime;
+
+/**
+ * 随机数生成器,根据字段的类型自动填充数据.
+ *
+ * 内容格式:类型标识符+是否动态(-或+)+长度+是否必输(*),由于XML的<要转义,故用-代替
+ * 类型标识符:S=字符串 I=整型 L=长整 T=时间 B=二进制 Y=是否 E=ENUM D=浮点
+ * 
+ * 整型的长度是限制输入位数,而时间的长度是限制随机距离当前时间的天数范围.
+ * 
+ * 一个必输的从1个字符到16个字符的随机长度随机字符填充的表达式:S-16*
+ * 一个可以为空的从1位到5位的整数:I-5
+ * 一个不允许为空的11位长整数:L11*
+ * 一个不允许为空的距离当前时间前后30天内的时间:T30*
+ * 一个距离当前3天前时间:T-3*(注意前后的时间均只到日期的00:00:00),不包含时分秒
+ * 一个距离当前4天后时间:T+4*
+ * 一个现在的时间:T0*
+ * 一个不允许为空的10随机字符的二进制:B10*
+ * 
+ * @author JIM
+ *
+ */
+public class RandomReplacementProcessor implements ReplacementProcessor {
+
+	private static final String FILE_START_DELIMITER = "$R{";
+	private static final String FILE_END_DELIMITER = "}R$";
+
+//	private Logger logger = Logger.getLogger(RandomReplacementProcessor.class);
+	
+	private static Pattern extractPattern = Pattern.compile("([SILTBY])([-+]{1})?(\\d+)?(\\*)?"); 
+	
+	@Override
+	public Object replacementSubStrToObject(ITable table, String column, String substring, IDatabaseConnection connection, DBTYPE dbType){
+		Matcher matcher = extractPattern.matcher(substring);
+		if(!matcher.find()) {
+			return null;
+		}
+		String mode = matcher.group(1);
+		Boolean growFlag = (matcher.group(2) == null ? null: "+".equals(matcher.group(2)));
+		Integer maxLength = (matcher.group(3) == null ? null: Integer.parseInt(matcher.group(3)));
+		boolean notNull = (matcher.group(4) != null);
+		
+		switch(mode.charAt(0)) {
+			case 'S': //字符
+				return randomString(growFlag, maxLength, notNull);
+			case 'I': //
+				return randomInteger(growFlag, maxLength, notNull);
+			case 'L':
+				return randomLong(growFlag, maxLength, notNull);
+			case 'T':
+				return randomTime(growFlag, maxLength, notNull);
+			case 'B':
+				return randomByteString(growFlag, maxLength, notNull);
+			case 'Y':
+				return randomBooleanString(notNull);
+			case 'E':
+				return randomEnumString(notNull);
+			case 'D':
+				return randomDouble(maxLength, notNull);
+			default:
+				return null;
+		}
+	}
+
+	private static final boolean randomNull() { //如果可以为空,1/10 概率出现NULL或空串
+		return RandomUtils.nextInt(10) == 0;
+	}
+	
+	private static final String ALLCHAR = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
+
+	private String randomString(Boolean growFlag, Integer maxLength, boolean notNull) {
+		
+		if(!notNull && randomNull()) {
+			return "";
+		}
+
+		if(maxLength == null){
+			maxLength = 16;
+		}
+
+		StringBuilder sb = new StringBuilder();
+
+		int length = maxLength - 1;
+		if (growFlag != null) {
+			length = RandomUtils.nextInt(maxLength);
+		}
+		for (int i = 0; i <= length; i++) {
+			sb.append(ALLCHAR.charAt(RandomUtils.nextInt(ALLCHAR.length())));
+		}
+		return sb.toString();
+	}
+
+	private Integer randomInteger(Boolean growFlag, Integer maxLength, boolean notNull) {
+		return 0; //BoolCodeEnum.fromValue(RandomUtils.nextBoolean());
+	}
+
+	private Integer randomLong(Boolean growFlag, Integer maxLength, boolean notNull) {
+		return 0;
+	}
+
+	private static final Random LONG_RANDOM = new Random();
+	private static final Date NOW_TIME = new Date(); //系统初始化的时间
+	private Date randomTime(Boolean growFlag, Integer maxLength, boolean notNull) {
+		if(!notNull && randomNull()) {
+			return null;
+		}
+		if(maxLength == null || maxLength == 0) {
+			return NOW_TIME;
+		}
+		if(growFlag == null) { //随机范围时间
+			long grow = LONG_RANDOM.nextLong() % (maxLength*24L*3600L*1000L); //随机天数范围
+			return new Date(NOW_TIME.getTime() - grow);
+		}else { //固定时间差
+			return UtilDateTime.addDays(UtilDateTime.getDayStartTime(NOW_TIME), growFlag ? maxLength: -maxLength);
+		}
+	}
+
+	private byte[] randomByteString(Boolean growFlag, Integer maxLength, boolean notNull) {
+		String value = randomString(growFlag, maxLength, notNull);
+		return value == null ? null : value.getBytes();
+	}
+
+	private String randomBooleanString(boolean notNull) {
+		return null;
+	}
+
+	private String randomEnumString(boolean notNull) {
+		return null;
+	}
+
+	private Double randomDouble(Integer maxLength, boolean notNull) { //double默认就是grow取范围
+		return null;
+	}
+
+	@Override
+	public String getStartDelim() {
+		return FILE_START_DELIMITER;
+	}
+
+	@Override
+	public String getEndDelim() {
+		return FILE_END_DELIMITER;
+	}
+}

+ 105 - 0
src/main/java/org/ccframe/commons/dbunit/ReplacementDataSet.java

@@ -0,0 +1,105 @@
+package org.ccframe.commons.dbunit;
+
+import org.ccframe.commons.util.DbUnitUtils;
+import org.ccframe.commons.util.DbUnitUtils.DBTYPE;
+import org.dbunit.database.IDatabaseConnection;
+import org.dbunit.dataset.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class ReplacementDataSet extends AbstractDataSet {
+
+    /**
+     * Logger for this class
+     */
+    private static final Logger LOGGER = LoggerFactory.getLogger(ReplacementDataSet.class);
+
+    private final IDataSet dataSet;
+    private IDatabaseConnection connection;
+    private DBTYPE dbType;
+    private List<ReplacementProcessor> processorList = new ArrayList<ReplacementProcessor>();
+
+    private static int tableId = 0;
+    
+    public ReplacementDataSet(IDataSet dataSet, IDatabaseConnection connection, DBTYPE dbType, List<ReplacementProcessor> processorList)
+    {
+        this.dataSet = dataSet;
+        this.connection = connection;
+        this.dbType = dbType;
+        this.processorList.addAll(processorList); 
+    }
+
+    private ReplacementTable createReplacementTable(ITable table)
+    {
+    	tableId ++;
+        return  new ReplacementTable(table, tableId, connection, dbType, processorList);
+    }
+
+    ////////////////////////////////////////////////////////////////////////////
+    // AbstractDataSet class
+
+    protected ITableIterator createIterator(boolean reversed)
+            throws DataSetException
+    {
+        return new ReplacementIterator(reversed ?
+                dataSet.reverseIterator() : dataSet.iterator());
+    }
+
+    ////////////////////////////////////////////////////////////////////////////
+    // IDataSet interface
+
+    public String[] getTableNames() throws DataSetException
+    {
+        LOGGER.debug("getTableNames() - start");
+
+        return dataSet.getTableNames();
+    }
+
+    public ITableMetaData getTableMetaData(String tableName)
+            throws DataSetException
+    {
+        return dataSet.getTableMetaData(tableName);
+    }
+
+    public ITable getTable(String tableName) throws DataSetException
+    {
+        return createReplacementTable(dataSet.getTable(tableName));
+    }
+
+    ////////////////////////////////////////////////////////////////////////////
+    // ReplacementIterator class
+
+    private class ReplacementIterator implements ITableIterator
+    {
+
+//        private final Logger logger = LoggerFactory.getLogger(ReplacementIterator.class);
+
+        private final ITableIterator iterator;
+
+        public ReplacementIterator(ITableIterator iterator)
+        {
+            this.iterator = iterator;
+        }
+
+        ////////////////////////////////////////////////////////////////////////
+        // ITableIterator interface
+
+        public boolean next() throws DataSetException
+        {
+            return iterator.next();
+        }
+
+        public ITableMetaData getTableMetaData() throws DataSetException
+        {
+            return iterator.getTableMetaData();
+        }
+
+        public ITable getTable() throws DataSetException
+        {
+            return createReplacementTable(iterator.getTable());
+        }
+    }
+}

+ 14 - 0
src/main/java/org/ccframe/commons/dbunit/ReplacementProcessor.java

@@ -0,0 +1,14 @@
+package org.ccframe.commons.dbunit;
+
+import org.ccframe.commons.util.DbUnitUtils;
+import org.ccframe.commons.util.DbUnitUtils.DBTYPE;
+import org.dbunit.database.IDatabaseConnection;
+import org.dbunit.dataset.ITable;
+
+import java.sql.SQLException;
+
+public interface ReplacementProcessor {
+	Object replacementSubStrToObject(ITable table, String column,  String string, IDatabaseConnection connection, DBTYPE dbType);
+	String getStartDelim();
+	String getEndDelim();
+}

+ 141 - 0
src/main/java/org/ccframe/commons/dbunit/ReplacementTable.java

@@ -0,0 +1,141 @@
+package org.ccframe.commons.dbunit;
+
+import java.sql.SQLException;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.ccframe.commons.helper.SpringContextHelper;
+import org.ccframe.commons.util.DbUnitUtils;
+import org.ccframe.commons.util.DbUnitUtils.DBTYPE;
+import org.ccframe.config.Global;
+import org.dbunit.database.IDatabaseConnection;
+import org.dbunit.dataset.DataSetException;
+import org.dbunit.dataset.ITable;
+import org.dbunit.dataset.ITableMetaData;
+import org.redisson.api.RAtomicLong;
+import org.redisson.api.RedissonClient;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.CaseFormat;
+
+public class ReplacementTable implements ITable{
+
+    /**
+     * Logger for this class
+     */
+    private static final Logger LOGGER = LoggerFactory.getLogger(ReplacementTable.class);
+    
+    private static final int OBJECT_CACHE_SIZE = 50; //50个field
+
+    private final ITable table;
+    private IDatabaseConnection connection;
+    private DBTYPE dbType;
+    private List<ReplacementProcessor> processorList;
+    /**
+     * 每切换一个不同的表代表一个新的tableId,用于结合行号标记唯一的XML表位置。例如XML1和XML2的SYS_USER表的tableId就会不一样.
+     */
+    private int tableId;
+    private String pkId;
+    private RAtomicLong atomicLong;
+
+	private RAtomicLong getAtomicLong() {
+		if(atomicLong == null) {
+			atomicLong = SpringContextHelper.getBean(RedissonClient.class).getAtomicLong(Global.REDIS_PERFIX + CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, this.pkId));
+			if(atomicLong.get() == 0) {
+				atomicLong.getAndIncrement();
+			}
+		}
+		return atomicLong;
+	}
+
+	private Map<String, Object> getRowObjectCache = new LinkedHashMap<String, Object>(5){
+		private static final long serialVersionUID = 1L;
+		@Override
+		protected boolean removeEldestEntry(Map.Entry<String, Object> eldest) {
+		    // 当前记录数大于设置的最大的记录数,删除最旧记录(即最近访问最少的记录)
+		    return size() > OBJECT_CACHE_SIZE;
+		}
+	};
+
+	private Map<String, Object> getRowIdCache = new LinkedHashMap<String, Object>(5){
+		private static final long serialVersionUID = 1L;
+		@Override
+		protected boolean removeEldestEntry(Map.Entry<String, Object> eldest) {
+		    // 当前记录数大于设置的最大的记录数,删除最旧记录(即最近访问最少的记录)
+		    return size() > OBJECT_CACHE_SIZE;
+		}
+	};
+
+    public ReplacementTable(ITable table, int tableId, IDatabaseConnection connection, DBTYPE dbType, List<ReplacementProcessor> processorList)
+    {
+        this.table = table;
+        this.connection = connection;
+        this.dbType = dbType;
+        this.processorList = processorList;
+        this.tableId = tableId;
+        this.pkId = table.getTableMetaData().getTableName();
+        this.pkId = this.pkId.substring(this.pkId.indexOf("_") + 1) + "_ID";
+    }
+
+    ////////////////////////////////////////////////////////////////////////
+    // ITable interface
+
+    public ITableMetaData getTableMetaData()
+    {
+        return table.getTableMetaData();
+    }
+
+    public int getRowCount()
+    {
+        return table.getRowCount();
+    }
+
+    public Object getValue(int row, String column) throws DataSetException{
+		//注意会进来2次,第一次是ignore mapping,第二次才是取值。因此要cache住行号列号位置,避免某些替换的动作重复了2次
+    	String value = (String)table.getValue(row, column);
+
+    	if(pkId.equals(column)) { //主键
+        	if(value == null) {
+        		Object objectCached = getRowIdCache.get(tableId + "-" + row);
+        		if(objectCached == null) {
+        			objectCached = new Long(getAtomicLong().getAndIncrement()).intValue();
+        			getRowIdCache.put(tableId + "-" + row, objectCached);
+        		}
+        		return objectCached;
+        	} else { //同步ID
+        		long next = getAtomicLong().get();
+        		int dbId = Integer.parseInt(value); //id必须是Integer
+        		if(next <= dbId) {
+        			getAtomicLong().set(dbId + 1);
+        		}
+        		return dbId;
+        	}
+    	}else { //非主键
+            if (value == null){
+            	return null;
+            }
+            for(ReplacementProcessor processor: processorList){
+                if (processor.getStartDelim() != null && processor.getEndDelim() != null && value.toString().startsWith(processor.getStartDelim()) && value.toString().endsWith( processor.getEndDelim())){
+                	Object objectCached = getRowObjectCache.get(tableId + "-" + row + "-" + column);
+                	if(objectCached == null) {
+                		objectCached = processor.replacementSubStrToObject(table, column, value.substring(processor.getStartDelim().length(), value.length() - processor.getEndDelim().length()), connection, dbType); 
+                    	getRowObjectCache.put(tableId + "-" + row + "-" + column, objectCached);
+                	}
+    				return objectCached;
+                }
+            }
+            return value;
+    	}
+    }
+
+    public String toString()
+    {
+    	StringBuffer sb = new StringBuffer();
+    	sb.append(getClass().getName()).append("[");
+    	sb.append(", table=").append(table);
+    	sb.append("]");
+    	return sb.toString();
+    }
+}

+ 67 - 0
src/main/java/org/ccframe/commons/filter/CcRequestLoggingFilter.java

@@ -0,0 +1,67 @@
+package org.ccframe.commons.filter;
+
+import javax.servlet.annotation.WebFilter;
+import javax.servlet.http.HttpServletRequest;
+
+import org.springframework.web.filter.AbstractRequestLoggingFilter;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * 将输入打印到ThreadLocal,如果有异常,配合BusinessExceptionResolver服务器则会显示完整的请求信息.
+ * @author JIM
+ *
+ */
+@WebFilter(filterName = "requestLogging",urlPatterns = {"/admin/*", "/api/*"})
+@Slf4j
+public class CcRequestLoggingFilter extends AbstractRequestLoggingFilter {
+
+	static final ThreadLocal<Boolean> shouldLog = new ThreadLocal<Boolean>(); 
+	
+	public CcRequestLoggingFilter() {
+		this.setAfterMessagePrefix("客户端请求:[");
+	}
+
+	@Override
+	protected boolean isIncludeHeaders() {
+		return true;
+	}
+
+	@Override
+	protected boolean isIncludeQueryString() {
+		return true;
+	}
+	
+	@Override
+	protected boolean isIncludePayload(){
+		return true;
+	}
+	
+	@Override
+	protected int getMaxPayloadLength() {
+		return 2000;
+	}
+
+	public static void pendingLog() {
+		shouldLog.set(true);
+	}
+
+	@Override
+	protected boolean shouldLog(HttpServletRequest request) {
+		return true; //每个请求必读缓存到ThreadLocal
+	}
+
+	@Override
+	protected void beforeRequest(HttpServletRequest request, String message) {
+		//nothing
+	}
+
+	@Override
+	protected void afterRequest(HttpServletRequest request, String message) {
+		if(Boolean.TRUE.equals(shouldLog.get()) && request.getContentType() != null && !request.getContentType().startsWith("multipart/form-data")) { //非二进制请求打印日志
+			log.error(message);
+			shouldLog.remove();
+		}
+	}
+
+}

+ 47 - 0
src/main/java/org/ccframe/commons/helper/EntityOperationListener.java

@@ -0,0 +1,47 @@
+package org.ccframe.commons.helper;
+
+import java.util.Date;
+
+import javax.persistence.PrePersist;
+import javax.persistence.PreRemove;
+import javax.persistence.PreUpdate;
+
+import org.ccframe.commons.auth.TokenUser;
+import org.ccframe.commons.base.BaseEntity;
+import org.ccframe.commons.util.CcTransactionUtil;
+import org.springframework.security.core.Authentication;
+import org.springframework.security.core.context.SecurityContextHolder;
+
+public class EntityOperationListener {
+
+	@PrePersist
+	protected void onCreate(BaseEntity baseEntity) {
+		Authentication authentication = SecurityContextHolder.getContext().getAuthentication();
+		if(authentication != null) { //WEB环境
+			TokenUser tokenUser = (TokenUser)authentication.getPrincipal(); //TODO处理权限
+			baseEntity.setCreateUserId(tokenUser.getUserId());
+		}
+		Date now = new Date();
+		baseEntity.setCreateTime(now);
+		baseEntity.setUpdateTime(now);
+
+		CcTransactionUtil.pushSave(baseEntity);
+	}
+
+	@PreUpdate
+	protected void onUpdate(BaseEntity baseEntity) {
+		Authentication authentication = SecurityContextHolder.getContext().getAuthentication();
+		if(authentication != null) { //WEB环境
+			TokenUser tokenUser = (TokenUser)authentication.getPrincipal(); //TODO处理权限
+			baseEntity.setUpdateUserId(tokenUser.getUserId());
+		}
+		baseEntity.setUpdateTime(new Date());
+		
+		CcTransactionUtil.pushSave(baseEntity);
+	}
+	
+	@PreRemove
+	protected void onRemove(BaseEntity baseEntity) {
+		CcTransactionUtil.pushDelete(baseEntity);
+	}
+}

+ 25 - 0
src/main/java/org/ccframe/commons/helper/SpringContextHelper.java

@@ -0,0 +1,25 @@
+package org.ccframe.commons.helper;
+
+import java.util.Map;
+
+import org.springframework.stereotype.Component;
+
+import net.oschina.j2cache.cache.support.util.SpringUtil;
+
+/**
+ * 直接从红薯工具增强
+ * @author Jim
+ *
+ */
+@Component
+public class SpringContextHelper extends SpringUtil {
+
+    public static boolean containsBean(String beanName) {
+    	return getApplicationContext().containsBean(beanName);
+    }
+
+    public static <T> Map<String, T> getBeansOfType(Class<T> requiredType){
+    	return getApplicationContext().getBeansOfType(requiredType);
+    }
+   
+}

+ 335 - 0
src/main/java/org/ccframe/commons/helper/SysInitBeanHelper.java

@@ -0,0 +1,335 @@
+package org.ccframe.commons.helper;
+
+import java.io.File;
+import java.net.URL;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicLong;
+
+import javax.annotation.PostConstruct;
+import javax.sql.DataSource;
+
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.context.ApplicationListener;
+import org.springframework.context.event.ContextRefreshedEvent;
+import org.springframework.core.convert.support.GenericConversionService;
+import org.springframework.scheduling.quartz.SchedulerFactoryBean;
+import org.springframework.stereotype.Component;
+import org.springframework.web.bind.support.ConfigurableWebBindingInitializer;
+import org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter;
+
+import com.google.common.base.CaseFormat;
+
+import org.ccframe.app.App;
+import org.ccframe.commons.base.IHasSearchBuilder;
+import org.ccframe.commons.dbunit.InitFileReplacementProcessor;
+import org.ccframe.commons.mvc.StringToDateConverter;
+import org.ccframe.commons.util.DbUnitUtils;
+import org.ccframe.config.Global;
+import org.ccframe.config.GlobalEx;
+import org.ccframe.subsys.core.queue.SmallPictQueueServer;
+import org.ccframe.subsys.core.service.FileInfService;
+import org.ccframe.subsys.core.service.QuartzService;
+import org.redisson.api.RAtomicLong;
+import org.redisson.api.RedissonClient;
+
+import net.oschina.j2cache.CacheChannel;
+
+
+/**
+ * 系统初始化.
+ *
+ * @author JIM
+ */
+@Component
+public class SysInitBeanHelper implements ApplicationListener<ContextRefreshedEvent>{
+
+	@Autowired
+	private DataSource dataSource;
+
+    @Value("${app.productName:ccframe}")
+	private String productName;
+	
+	private String initMode;
+	
+    @Value("${app.init.baseDataPath:}")
+    private String baseDataPath;
+    
+    @Value("${app.init.productDataPath:}") //默认逗号分隔
+    private String[] productDataPaths;
+
+    @Value("${app.init.loadAndDeleteDataPath:}")
+    private String loadAndDeleteDataPath;
+
+    @Value("${app.dataSource.schema:}")
+    private String schema;
+
+    @Value("${app.init.qserver}")
+	private boolean qserver;
+
+    @Value("${app.search.embedded}")
+	private boolean embedded;
+
+    private DbUnitUtils.DBTYPE dbType = DbUnitUtils.DBTYPE.MYSQL;
+
+    private Map<String, String> replacementMap;
+    
+	private Logger log = LoggerFactory.getLogger(this.getClass());
+
+	@Value("${app.debug.autoLoginUserId:0}")
+	public Integer autoLoginUserId;
+
+	@Value("${app.debug.autoLoginShopId:0}")
+	public Integer autoLoginShopId;
+
+	public static boolean inited = false;
+	
+//	@Autowired
+//	public void setSmallPictImageQueueHelper(SmallPictImageQueueHelper smallPictImageQueueHelper) {
+//		this.smallPictImageQueueHelper = smallPictImageQueueHelper;
+//	}
+	
+    @Value("${app.dataSource.hibernateDialect}")
+    public void setDialect(String dialect) {
+    	if("org.hibernate.dialect.Oracle10gDialect".equals(dialect)){
+    		dbType = DbUnitUtils.DBTYPE.ORACLE10;
+    	}
+    	if("org.hibernate.dialect.MySQL5InnoDBDialect".equals(dialect)){
+    		dbType = DbUnitUtils.DBTYPE.MYSQL;
+    	}
+    	if("org.hibernate.dialect.OracleDialect".equals(dialect)){
+    		dbType = DbUnitUtils.DBTYPE.ORACLE;
+    	}
+    	if("org.hibernate.dialect.H2Dialect".equals(dialect)){
+    		dbType = DbUnitUtils.DBTYPE.H2;
+    	}
+    	if("org.hibernate.dialect.SQLServerDialect".equals(dialect)){
+    		dbType = DbUnitUtils.DBTYPE.MSSQL;
+    	}
+    }
+
+    @Value("${app.init.mode:none}")
+    public void setInitMode(String initMode){
+    	String forceInitMode = System.getProperty("forceInitMode", null); //自动发布时,通过环境变量-D强制指定是重置还是不处理
+    	this.initMode = (forceInitMode == null ? initMode : forceInitMode);
+    }
+    
+	@Autowired
+	private CacheChannel cacheChannel;
+
+	@Autowired
+	private RedissonClient redissonClient;
+
+    @Autowired
+    public void setReplacementMap(Map<String, String> replacementMap){
+    	this.replacementMap = replacementMap;
+    }
+
+//    public static boolean isInited() {
+//		return inited;
+//	}
+
+	@Autowired
+	private RequestMappingHandlerAdapter handlerAdapter;
+
+	@Autowired
+	private QuartzService quartzService;
+	
+	/**
+	 * 此方法解决前台提交的日期参数绑定不正确问题,将自己实现的StringToDateConverter交给spring,让其知道如何进行处理
+	 */
+	@PostConstruct //@PostContruct是spring框架的注解,在方法上加该注解会在项目启动的时候执行该方法,也可以理解为在spring容器初始化的时候执行该方法。
+	public void initEditableValidation() {
+		ConfigurableWebBindingInitializer initializer = (ConfigurableWebBindingInitializer) handlerAdapter.getWebBindingInitializer();
+		if (initializer.getConversionService() != null) {
+			GenericConversionService genericConversionService = (GenericConversionService) initializer.getConversionService();
+			genericConversionService.addConverter(new StringToDateConverter());
+		}
+	}
+
+	@Autowired
+	private SchedulerFactoryBean schedulerFactoryBean;
+
+	@Override
+	public void onApplicationEvent(ContextRefreshedEvent event) { //只有这样才能保证spring bean完全初始化完毕才执行。InitializingBean只能保证当前bean及显式注入初始化
+		if (event.getApplicationContext().getParent() == null) {
+			Date contextInitEndTime = new Date();
+			long milliseconds = contextInitEndTime.getTime() - App.getContextInitStartTime().getTime();
+	    	log.info("容器初始化耗时:"+ milliseconds / 1000 + "." + milliseconds % 1000 + " s");
+	    	log.info(productName + "开始启动...");
+	    	if(qserver) { //只有queue server有权限重建数据/索引
+	    		if("create".equals(initMode)) {
+			        createTables();
+			        loadOnceData();
+			        clearCache();
+			        syncRedisSequence(); //同步cache里的ID索引(避免在缓存重建情况下出错)
+			        initFileInf();
+			        buildIndex(!embedded); //只有QServcer才会删除重建索引,如果是测试节点,无需做delete,因为新建会自动删除目录
+	    		}else {
+	    			loadOnceData(); //补丁数据要手动重建索引,且可能会导致图片补丁冲突(不要产生图片的业务)
+			        initFileInf();
+			        syncRedisSequence(); //同步cache里的ID索引(避免在缓存重建情况下出错)
+	    		}
+		        quartzService.initJob("create".equals(initMode)); //重建/增量更新定时器
+	    	}
+	        inited = true;
+	    	log.info(productName + "启动完毕...");
+	    	milliseconds = new Date().getTime() - contextInitEndTime.getTime();
+	    	log.info(productName + "启动耗时:"+ milliseconds / 1000 + "." + milliseconds % 1000 + " s");
+		}
+	}
+
+	private void clearCache() {
+    	log.info("清理缓存...");
+		cacheChannel.clear(GlobalEx.CACHEREGION_DEFAULT);
+		cacheChannel.clear(GlobalEx.CACHEREGION_ENTITY);
+		cacheChannel.clear(GlobalEx.CACHEREGION_FOREVER);
+		cacheChannel.clear(GlobalEx.CACHEREGION_QUERY);
+	}
+	
+//	@SuppressWarnings({ "unchecked", "rawtypes" })
+//	private List<Class<? extends Annotation>> scanEntityClasses(String pattern) throws URISyntaxException, IOException, ClassNotFoundException{
+//    	List<Class<? extends Annotation>> annotatedClassList = new ArrayList<Class<? extends Annotation>>();
+//    	
+//    	int pathLen = WebContextHolder.getWarPath().length();
+//    	ResourcePatternResolver resolver = new PathMatchingResourcePatternResolver();
+//    	for(Resource resources: resolver.getResources(pattern)){ //"classpath:org/ccframe/subsys/*/domain/*.class"
+//    		String uriStr = resources.getFile().getPath();
+//    		Class scanClass = Class.forName(uriStr.substring(pathLen + 17, uriStr.length() - 6).replaceAll("/".equals(File.separator) ? "/" : "\\\\", "."));
+//			if(scanClass.getAnnotation(Entity.class) != null){
+//				annotatedClassList.add(scanClass);
+//			}
+//    	}
+//    	return annotatedClassList;
+//    }
+    
+    /**
+     * 初始化系统表和业务表数据
+     *
+     * @throws java.sql.SQLException
+     * @throws org.dbunit.DatabaseUnitException
+     *
+     * @throws Exception
+     */
+    private void createTables() {
+        if ("create".equals(initMode)) { //TODO user hiberante constant instead of "create"
+       		log.info("整站数据重新创建模式,自动装入初始化数据...");
+            try {
+            	List<String> dataPaths = new ArrayList<String>();
+            	if(productDataPaths != null){
+                    for (String productDataPath : productDataPaths) {
+                        dataPaths.add(productDataPath);
+                    }
+            	}
+            	log.info("载入框架初始数据...");
+                DbUnitUtils.appendDbUnitData(dataSource, schema, dbType, replacementMap, new String[] {baseDataPath});
+//            	if(StringUtils.isNotBlank(defaultDeleteDataPath)){
+//                	log.info("执行框架初始数据剪裁...");
+//                	DbUnitUtils.deleteDbUnitData(dataSource, schema, dbType, replacementMap, new String[]{defaultDeleteDataPath});
+//                }
+            	log.info("载入项目初始数据...");
+            	Collections.reverse(dataPaths);
+                DbUnitUtils.appendDbUnitData(dataSource, schema, dbType, replacementMap, dataPaths.toArray(new String[dataPaths.size()]));
+            	log.info("自动装入初始化数据结束...");
+            } catch (Exception e) { //NOSONAR
+                log.error("load init data failed.", e);
+            }
+        }
+    }
+
+    private void syncRedisSequence() {
+    	if (!"create".equals(initMode)) {
+        	try {
+        		Connection connection = dataSource.getConnection();
+        		try(
+    				ResultSet tables = connection.getMetaData().getTables(null, null, "%", null);
+				){
+					while(tables.next()) {
+						String tableName = tables.getString("TABLE_NAME");
+						String tableId = tableName.substring(tableName.indexOf("_") + 1) + "_ID";
+						try {
+							// 查下redis有没有建序列,没有的话才查询最大序列
+							String redisKeyName = CaseFormat.UPPER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, tableId);
+							RAtomicLong atomicLong = redissonClient.getAtomicLong(Global.REDIS_PERFIX + redisKeyName);
+							long next = atomicLong.get();
+							if(next == 0) { //没有初始化
+								try(
+									PreparedStatement preparedStatement = connection.prepareStatement("SELECT MAX(" + tableId + ") FROM " + tableName, new String[] {tableId});
+									ResultSet result = preparedStatement.executeQuery();
+								){
+									if(result.next()) {
+										atomicLong.set(result.getInt(1) + 1); //初始值
+									}
+								}
+							}
+						}catch (SQLException e) { //继续继续,可能是一些其他不匹配ID格式乱七八糟的表
+							log.warn("Init table {} redis sequence failed, Reason: {}", tableName, e.getMessage());
+						}
+					}
+        		}
+			} catch (SQLException e) {
+				throw new RuntimeException(e);
+			}
+        }
+    }
+    
+    private void loadOnceData(){
+    	if(StringUtils.isNotBlank(loadAndDeleteDataPath)){
+            try {
+            	File file = new File(GlobalEx.APP_BASE_DIR + File.separator + loadAndDeleteDataPath);
+            	if(file.exists()){
+                	log.info("发现数据补丁,装入并删除补丁数据 " + loadAndDeleteDataPath + "...");
+					DbUnitUtils.appendDbUnitData(dataSource, schema, dbType, replacementMap, new String[]{loadAndDeleteDataPath});
+	            	if(file.delete()){
+	                    log.info("success load and delete customer data file: " + file.getPath());
+	            	}
+	            	log.info("装入并自动删除成功...");
+            	}else{
+                    log.warn("补丁数据 " + loadAndDeleteDataPath + " 未发现,跳过.");
+            	}
+			} catch (Exception e) { //NOSONAR
+                log.error("加载补丁数据失败.", e);
+			}
+    	}
+    }
+
+	@SuppressWarnings("rawtypes")
+	public void buildIndex(boolean forceDelete){
+    	log.info("开始重建索引...");
+		Map<String, IHasSearchBuilder> searchBuilderMap = SpringContextHelper.getBeansOfType(IHasSearchBuilder.class);
+		List<IHasSearchBuilder> searchBuilderList = new ArrayList<IHasSearchBuilder>(searchBuilderMap.values());
+		searchBuilderList.sort(new Comparator<IHasSearchBuilder>() {
+			public int compare(IHasSearchBuilder lhs, IHasSearchBuilder rhs) {
+				return lhs.getPriority().compareTo(rhs.getPriority()); //当只设置少数几个Async线程干活时,会按照顺序来建立
+			}
+		});
+//		searchBuilderList.sort((lhs,rhs) -> lhs.getPriority().compareTo(rhs.getPriority())); //jetty8跟lambda表达式冲突
+		for(IHasSearchBuilder searchBuilder: searchBuilderList){
+			searchBuilder.buildAllIndex(forceDelete); //异步建立索引
+		}
+	}
+
+	private void initFileInf() {
+        if (!InitFileReplacementProcessor.getInitImageSaveRowList().isEmpty()) {
+	    	log.info("处理预置图片资源...");
+	    	SpringContextHelper.getBean(FileInfService.class).processInitFile();
+        }
+	}
+	
+	public Boolean isAdminAutoLogin() {
+		return autoLoginUserId == null ? false : (autoLoginUserId > 0);
+	}
+
+}

+ 47 - 0
src/main/java/org/ccframe/commons/helper/ValidateCodeHelper.java

@@ -0,0 +1,47 @@
+package org.ccframe.commons.helper;
+
+import java.util.concurrent.TimeUnit;
+
+import org.ccframe.config.GlobalEx;
+import org.redisson.api.RMapCache;
+import org.redisson.api.RedissonClient;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Component;
+
+/**
+ * 页面/手机 验证码存储,时限为20分钟
+ * @author Administrator
+ *
+ */
+@Component
+public class ValidateCodeHelper {
+	
+	@Autowired
+	private RedissonClient redissonClient;
+	
+	public boolean putCode(String key, String code) {
+		if(!SysInitBeanHelper.inited) {
+			return false;
+		}
+		RMapCache<String, String> validateCodeMap = redissonClient.getMapCache(GlobalEx.VALIDATE_CODE_MAP);
+		validateCodeMap.put(key, code, GlobalEx.VALIDATE_CODE_EXPIRE_MINUTE, TimeUnit.MINUTES);
+		validateCodeMap.clearExpireAsync();
+		return true;
+	}
+
+	public String getCode(String key) {
+		if(!SysInitBeanHelper.inited) {
+			return null;
+		}
+		RMapCache<String, String> validateCodeMap = redissonClient.getMapCache(GlobalEx.VALIDATE_CODE_MAP);
+		return validateCodeMap.get(key);
+	}
+
+	public String expireCode(String key) {
+		if(!SysInitBeanHelper.inited) {
+			return null;
+		}
+		RMapCache<String, String> validateCodeMap = redissonClient.getMapCache(GlobalEx.VALIDATE_CODE_MAP);
+		return validateCodeMap.remove(key);
+	}
+}

+ 54 - 0
src/main/java/org/ccframe/commons/helper/ValueLockHelper.java

@@ -0,0 +1,54 @@
+package org.ccframe.commons.helper;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+/**
+ * 基于equals比较的对象锁池.
+ * 用于实现基于ID的互斥操作,例如一个ID的对象同时被多人操作,一个表的不同业务操作不同时执行等
+ * 
+ * 支持简单包装对象例如Integer, String
+ * @author JIM
+ *
+ * @param <T>
+ * @deprecated 集群环境有问题,建议用RLock
+ */
+public class ValueLockHelper<T> {
+
+	private Map<T, ValueWrapper<T>> valueLockMap;
+	
+	private static final int DEFAULT_SIZE = 200;
+	
+	public ValueLockHelper(){
+		this(DEFAULT_SIZE);
+	}
+
+	public ValueLockHelper(final int cacheSize){
+
+		valueLockMap = new LinkedHashMap<T, ValueWrapper<T>>(cacheSize){
+
+			private static final long serialVersionUID = 2082087443457277997L;
+
+			@Override
+			protected boolean removeEldestEntry(Map.Entry<T, ValueWrapper<T>> eldest) {
+				// 当前记录数大于设置的最大的记录数,删除最旧记录(即最近访问最少的记录)
+				return size() > cacheSize;
+			}
+		};
+	}
+	
+	public synchronized ValueWrapper<T> getValueLock(T value) {
+		if(value == null) {
+			throw new IllegalArgumentException("value cannot be null");
+		}
+		ValueWrapper<T> savedValue = valueLockMap.get(value);
+		if(savedValue == null) {
+			ValueWrapper<T> valueWrapper = new ValueWrapper<T>();
+			valueWrapper.setValue(value);
+			valueLockMap.put(value, valueWrapper);
+			savedValue = valueWrapper;
+		}
+		return savedValue;
+	}
+
+}

+ 20 - 0
src/main/java/org/ccframe/commons/helper/ValueWrapper.java

@@ -0,0 +1,20 @@
+package org.ccframe.commons.helper;
+
+import java.io.Serializable;
+
+import lombok.AllArgsConstructor;
+import lombok.Getter;
+import lombok.NoArgsConstructor;
+import lombok.Setter;
+
+@Getter
+@Setter
+@NoArgsConstructor
+@AllArgsConstructor
+public class ValueWrapper<T> implements Serializable{
+	
+	private static final long serialVersionUID = 3705825667199351080L;
+
+	private T value;
+	
+}

+ 50 - 0
src/main/java/org/ccframe/commons/jpaquery/Criteria.java

@@ -0,0 +1,50 @@
+package org.ccframe.commons.jpaquery;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.persistence.criteria.CriteriaBuilder;
+import javax.persistence.criteria.CriteriaQuery;
+import javax.persistence.criteria.Predicate;
+import javax.persistence.criteria.Root;
+
+import org.springframework.data.jpa.domain.Specification;
+
+/** 
+ * 定义一个查询条件容器 
+ * @author Jim
+ */  
+public class Criteria<T> implements Specification<T>{
+	/**
+	 * 
+	 */
+	private static final long serialVersionUID = 7723262888497747481L;
+
+	private List<Criterion> criterionList = new ArrayList<Criterion>();
+	
+    public Criteria(){};
+	
+	public Predicate toPredicate(Root<T> root, CriteriaQuery<?> query, CriteriaBuilder builder) {  
+		if (!criterionList.isEmpty()) {
+            List<Predicate> predicates = new ArrayList<Predicate>();  
+            for(Criterion c : criterionList){  
+                predicates.add(c.toPredicate(root, query,builder));  
+            }  
+            // 将所有条件用 and 联合起来  
+            if (predicates.size() > 0) {  
+                return builder.and(predicates.toArray(new Predicate[predicates.size()]));  
+            }  
+        }  
+        return builder.conjunction();  
+    }
+
+	/** 
+     * 增加简单条件表达式 
+     */  
+    public Criteria<T> add(Criterion criterion){  
+        if(criterion!=null){  
+            criterionList.add(criterion);  
+        }
+        return this;
+    }
+}  

+ 17 - 0
src/main/java/org/ccframe/commons/jpaquery/Criterion.java

@@ -0,0 +1,17 @@
+package org.ccframe.commons.jpaquery;
+
+import javax.persistence.criteria.CriteriaBuilder;
+import javax.persistence.criteria.CriteriaQuery;
+import javax.persistence.criteria.Predicate;
+import javax.persistence.criteria.Root;
+
+/**
+ * 条件接口 用户提供条件表达式接口
+ */
+public interface Criterion {
+	public enum Operator {
+		EQ, NE, LIKE, GT, LT, GTE, LTE, AND, OR
+	}
+
+	public Predicate toPredicate(Root<?> root, CriteriaQuery<?> query, CriteriaBuilder builder);
+}

+ 39 - 0
src/main/java/org/ccframe/commons/jpaquery/LogicalExpression.java

@@ -0,0 +1,39 @@
+package org.ccframe.commons.jpaquery;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.persistence.criteria.CriteriaBuilder;
+import javax.persistence.criteria.CriteriaQuery;
+import javax.persistence.criteria.Predicate;
+import javax.persistence.criteria.Root;
+
+/**
+ * 逻辑条件表达式 用于复杂条件时使用,如但属性多对应值的OR查询等
+ * 
+ * @author lee
+ * 
+ */
+public class LogicalExpression implements Criterion {
+	private Criterion[] criterion; // 逻辑表达式中包含的表达式
+	private Operator operator; // 计算符
+
+	public LogicalExpression(Criterion[] criterions, Operator operator) {
+		this.criterion = criterions;
+		this.operator = operator;
+	}
+
+	public Predicate toPredicate(Root<?> root, CriteriaQuery<?> query, CriteriaBuilder builder) {
+		List<Predicate> predicates = new ArrayList<Predicate>();
+		for (int i = 0; i < this.criterion.length; i++) {
+			predicates.add(this.criterion[i].toPredicate(root, query, builder));
+		}
+		switch (operator) {
+		case OR:
+			return builder.or(predicates.toArray(new Predicate[predicates.size()]));
+		default:
+			return null;
+		}
+	}
+
+}

+ 182 - 0
src/main/java/org/ccframe/commons/jpaquery/Restrictions.java

@@ -0,0 +1,182 @@
+package org.ccframe.commons.jpaquery;
+
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.springframework.util.ObjectUtils;
+
+import org.ccframe.commons.jpaquery.Criterion.Operator;
+
+/**
+ * 条件构造器 用于创建条件表达式
+ */
+public class Restrictions {
+
+	/**
+	 * 等于
+	 * 
+	 * @param fieldName
+	 * @param value
+	 * @param ignoreNull
+	 * @return
+	 */
+	public static SimpleExpression eq(String fieldName, Object value) {
+		if (ObjectUtils.isEmpty(value)){
+			return null;
+		}
+		return new SimpleExpression(fieldName, value, Operator.EQ);
+	}
+
+	/**
+	 * 不等于
+	 * 
+	 * @param fieldName
+	 * @param value
+	 * @param ignoreNull
+	 * @return
+	 */
+	public static SimpleExpression ne(String fieldName, Object value) {
+		if (ObjectUtils.isEmpty(value)){
+			return null;
+		}
+		return new SimpleExpression(fieldName, value, Operator.NE);
+	}
+
+	/**
+	 * 模糊匹配
+	 * 
+	 * @param fieldName
+	 * @param value
+	 * @param ignoreNull
+	 * @return
+	 */
+	public static SimpleExpression like(String fieldName, String value) {
+		if (ObjectUtils.isEmpty(value)){
+			return null;
+		}
+		return new SimpleExpression(fieldName, value, Operator.LIKE);
+	}
+
+	/**
+	 * 
+	public static SimpleExpression like(String fieldName, String value, MatchMode matchMode) {
+		if (value == null){
+			return null;
+		}
+		return new SimpleExpression (fieldName, value, Operator.LIKE);  
+	}
+	 */
+
+	/**
+	 * 大于
+	 * 
+	 * @param fieldName
+	 * @param value
+	 * @param ignoreNull
+	 * @return
+	 */
+	public static SimpleExpression gt(String fieldName, Object value) {
+		if (value == null){
+			return null;
+		}
+		return new SimpleExpression(fieldName, value, Operator.GT);
+	}
+
+	/**
+	 * 小于
+	 * 
+	 * @param fieldName
+	 * @param value
+	 * @param ignoreNull
+	 * @return
+	 */
+	public static SimpleExpression lt(String fieldName, Object value) {
+		if (ObjectUtils.isEmpty(value)){
+			return null;
+		}
+		return new SimpleExpression(fieldName, value, Operator.LT);
+	}
+
+	/**
+	 * 大于等于
+	 * 
+	 * @param fieldName
+	 * @param value
+	 * @param ignoreNull
+	 * @return
+	 */
+	public static SimpleExpression lte(String fieldName, Object value) {
+		if (ObjectUtils.isEmpty(value)){
+			return null;
+		}
+		return new SimpleExpression(fieldName, value, Operator.GTE);
+	}
+
+	/**
+	 * 小于等于
+	 * 
+	 * @param fieldName
+	 * @param value
+	 * @param ignoreNull
+	 * @return
+	 */
+	public static SimpleExpression gte(String fieldName, Object value) {
+		if (ObjectUtils.isEmpty(value)){
+			return null;
+		}
+		return new SimpleExpression(fieldName, value, Operator.LTE);
+	}
+
+	/**
+	 * 并且
+	 * 
+	 * @param criterions
+	 * @return
+	 */
+	public static LogicalExpression and(Criterion... criterions) {
+		return new LogicalExpression(criterions, Operator.AND);
+	}
+
+	/**
+	 * 或者
+	 * 
+	 * @param criterions
+	 * @return
+	 */
+	public static LogicalExpression or(Criterion... criterions) {
+		return new LogicalExpression(criterions, Operator.OR);
+	}
+
+	/**
+	 * 包含于,底层实现为OR,适用少量条件IN查询
+	 * 
+	 * @param fieldName
+	 * @param value
+	 * @return
+	 */
+	@SuppressWarnings("rawtypes")
+	public static LogicalExpression in(String fieldName, Object[] values) {
+		return in(fieldName, Arrays.asList(values));
+	}
+
+	/**
+	 * 包含于,底层实现为OR,适用少量条件IN查询
+	 * 
+	 * @param fieldName
+	 * @param value
+	 * @return
+	 */
+	@SuppressWarnings("rawtypes")
+	public static LogicalExpression in(String fieldName, Collection value) {
+		if (value == null || value.isEmpty()) {
+			return null;
+		}
+		SimpleExpression[] ses = new SimpleExpression[value.size()];
+		int i = 0;
+		for (Object obj : value) {
+			ses[i] = new SimpleExpression(fieldName, obj, Operator.EQ);
+			i++;
+		}
+		return new LogicalExpression(ses, Operator.OR);
+	}
+}

+ 75 - 0
src/main/java/org/ccframe/commons/jpaquery/SimpleExpression.java

@@ -0,0 +1,75 @@
+package org.ccframe.commons.jpaquery;
+
+import javax.persistence.criteria.CriteriaBuilder;
+import javax.persistence.criteria.CriteriaQuery;
+import javax.persistence.criteria.Expression;
+import javax.persistence.criteria.Path;
+import javax.persistence.criteria.Predicate;
+import javax.persistence.criteria.Root;
+
+import org.apache.commons.lang3.StringUtils;
+
+/**
+ * 简单条件表达式
+ * 
+ * @author lee
+ * 
+ */
+public class SimpleExpression implements Criterion {
+
+	private String fieldName; // 属性名
+	private Object value; // 对应值
+	private Operator operator; // 计算符
+
+	protected SimpleExpression(String fieldName, Object value, Operator operator) {
+		this.fieldName = fieldName;
+		this.value = value;
+		this.operator = operator;
+	}
+
+	public String getFieldName() {
+		return fieldName;
+	}
+
+	public Object getValue() {
+		return value;
+	}
+
+	public Operator getOperator() {
+		return operator;
+	}
+
+	@SuppressWarnings({ "rawtypes", "unchecked" })
+	public Predicate toPredicate(Root<?> root, CriteriaQuery<?> query, CriteriaBuilder builder) {
+		Path expression = null;
+		if (fieldName.contains(".")) {
+			String[] names = StringUtils.split(fieldName, ".");
+			expression = root.get(names[0]);
+			for (int i = 1; i < names.length; i++) {
+				expression = expression.get(names[i]);
+			}
+		} else {
+			expression = root.get(fieldName);
+		}
+
+		switch (operator) {
+		case EQ:
+			return builder.equal(expression, value);
+		case NE:
+			return builder.notEqual(expression, value);
+		case LIKE:
+			return builder.like((Expression<String>) expression, "%" + value + "%");
+		case LT:
+			return builder.lessThan(expression, (Comparable) value);
+		case GT:
+			return builder.greaterThan(expression, (Comparable) value);
+		case LTE:
+			return builder.lessThanOrEqualTo(expression, (Comparable) value);
+		case GTE:
+			return builder.greaterThanOrEqualTo(expression, (Comparable) value);
+		default:
+			return null;
+		}
+	}
+
+}

+ 28 - 0
src/main/java/org/ccframe/commons/mvc/CcUserArgumentResolver.java

@@ -0,0 +1,28 @@
+package org.ccframe.commons.mvc;
+
+import org.ccframe.commons.auth.TokenUser;
+import org.springframework.core.MethodParameter;
+import org.springframework.security.core.context.SecurityContextHolder;
+import org.springframework.web.bind.support.WebDataBinderFactory;
+import org.springframework.web.context.request.NativeWebRequest;
+import org.springframework.web.method.support.HandlerMethodArgumentResolver;
+import org.springframework.web.method.support.ModelAndViewContainer;
+
+/**
+ * 实现TokenUser的注入
+ * @author JIM
+ *
+ */
+public class CcUserArgumentResolver implements HandlerMethodArgumentResolver{
+
+	@Override
+	public boolean supportsParameter(MethodParameter parameter) {
+		return parameter.getParameterType().equals(TokenUser.class);
+	}
+
+	@Override
+	public Object resolveArgument(MethodParameter parameter, ModelAndViewContainer mavContainer, NativeWebRequest webRequest, WebDataBinderFactory binderFactory) throws Exception {
+		return SecurityContextHolder.getContext().getAuthentication().getPrincipal(); //在api或admin路径下直接把 解析的用户返回
+	}
+	
+}

+ 45 - 0
src/main/java/org/ccframe/commons/mvc/CcframeTransactionManager.java

@@ -0,0 +1,45 @@
+package org.ccframe.commons.mvc;
+
+import org.springframework.orm.jpa.JpaTransactionManager;
+import org.springframework.transaction.TransactionDefinition;
+import org.springframework.transaction.support.DefaultTransactionStatus;
+
+import org.ccframe.commons.util.CcTransactionUtil;
+
+/**
+ * 完成缓存、索引的事务同步。
+ * 所有的操作请求在jsp对象持久化的时候生成,在事务完结的时候才真正生效/提交
+ * 因此在写事务操作里禁止使用事务查询,使用数据库的模式
+ * @author JIM
+ */
+public class CcframeTransactionManager extends JpaTransactionManager {
+
+	/**
+	 * 
+	 */
+	private static final long serialVersionUID = -3878501009638970644L;
+
+	@Override
+	protected void doBegin(Object transaction, TransactionDefinition definition) {
+		super.doBegin(transaction, definition);
+		if(!definition.isReadOnly()){ //只读事务无需操作索引
+			CcTransactionUtil.init();
+		}
+	}
+
+	@Override
+	protected void doCommit(DefaultTransactionStatus status) {
+		super.doCommit(status);
+		if(!status.isReadOnly()){ //只读事务无需操作索引
+			CcTransactionUtil.commit(); //注意这种模式,如果一个事务里有写入A表,建议不要在事务里再使用searchService去查询读A表,因为数据要在提交的时候才生效,直接使用数据库service去操作
+		}
+	}
+
+	@Override
+	protected void doRollback(DefaultTransactionStatus status) {
+		if(!status.isReadOnly()){ //只读事务无需操作索引
+			CcTransactionUtil.rollback();
+		}
+	}
+
+}

+ 97 - 0
src/main/java/org/ccframe/commons/mvc/ClientPage.java

@@ -0,0 +1,97 @@
+package org.ccframe.commons.mvc;
+
+import java.io.Serializable;
+import java.util.List;
+
+/**
+ * 
+ * 1.兼容GXT的返回数据格式
+ * 2.兼容SDK的开发模式输出页面相关的页数据
+ * 3.参考spring-data的分页名称定义,可以直接copyProperties
+ * 3.关联分页tagLib组件的输出.
+ * 
+ * @author Jim
+ * 
+ * @deprecated 使用spring自带的page逻辑
+ */
+public class ClientPage<E> implements Serializable{
+	
+	/**
+	 * 
+	 */
+	private static final long serialVersionUID = -8803959390342016899L;
+
+	private List<E> list;
+
+	/**
+	 * 总记录数.
+	 */
+	private int totalLength;
+	
+	/**
+	 * 当前页编号,从0开始
+	 */
+	private int page;
+	
+	/**
+	 * 每页的记录数
+	 */
+	private int size;
+	
+	public ClientPage(){}
+
+	public ClientPage(int totalLength, int page, int size, List<E> list){
+		this.totalLength = totalLength;
+		this.page = page;
+		this.size = size;
+		this.list = list;
+	}
+	
+
+	public List<E> getList() {
+		return list;
+	}
+
+	public void setList(List<E> list) {
+		this.list = list;
+	}
+
+	public int getTotalLength() {
+		return totalLength;
+	}
+
+	public void setTotalLength(int totalLength) {
+		this.totalLength = totalLength;
+	}
+
+	public int getPage() {
+		return page;
+	}
+
+	public void setPage(int page) {
+		this.page = page;
+	}
+
+	public int getSize() {
+		return size;
+	}
+
+	public void setSize(int size) {
+		this.size = size;
+	}
+
+	/**
+	 * @return 总页数,自然页
+	 */
+	public int getTotalPages(){
+		return totalLength <= 1 ? 1 : ((totalLength - 1) / size + 1);
+	}
+	
+	/**
+	 * @return 数据位置
+	 */
+	public int getOffset(){
+		return page * size;
+	};
+
+}

+ 17 - 0
src/main/java/org/ccframe/commons/mvc/QueryObject.java

@@ -0,0 +1,17 @@
+package org.ccframe.commons.mvc;
+
+import lombok.Getter;
+import lombok.Setter;
+import lombok.ToString;
+
+/**
+ * 公共的查询对象,需要参数从中扩展
+ * @author Jim
+ *
+ */
+@Getter
+@Setter
+@ToString
+public class QueryObject {
+	private String searchText;
+}

+ 24 - 0
src/main/java/org/ccframe/commons/mvc/StringToDateConverter.java

@@ -0,0 +1,24 @@
+package org.ccframe.commons.mvc;
+
+import java.util.Date;
+
+import org.apache.commons.lang3.StringUtils;
+import org.springframework.core.convert.converter.Converter;
+
+import org.ccframe.commons.util.UtilDateTime;
+
+public class StringToDateConverter implements Converter<String, Date> {
+ 
+	@Override
+	public Date convert(String source) {
+		if (StringUtils.isBlank(source)) {
+			return null;
+		}
+		source = source.trim();
+		if(source.contains(":")) { //年月日-时分秒
+			return UtilDateTime.convertStringToDateTime(source);
+		}else {
+			return UtilDateTime.convertStringToDate(source);
+		}
+	}
+}

+ 57 - 0
src/main/java/org/ccframe/commons/quartz/BaseQuartzJob.java

@@ -0,0 +1,57 @@
+package org.ccframe.commons.quartz;
+
+import org.quartz.DisallowConcurrentExecution;
+import org.quartz.Job;
+import org.quartz.JobExecutionContext;
+import org.quartz.JobExecutionException;
+import org.redisson.api.RLock;
+import org.redisson.api.RedissonClient;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.ccframe.commons.helper.SysInitBeanHelper;
+
+/**
+ * 基于系统数据的一般任务.
+ * @author JIM
+ *
+ */
+public abstract class BaseQuartzJob implements Job, Runnable{
+
+	private static final String CONCURRENT_LOCK = "ConcurrentLock";
+	
+	@Autowired
+	private RedissonClient redissonClient;
+
+	private String lockJobClass;
+	
+	public BaseQuartzJob() {
+		DisallowConcurrentExecution disallowConcurrentExecution = getClass().getAnnotation(DisallowConcurrentExecution.class);
+		if(disallowConcurrentExecution != null) {
+			lockJobClass = getClass().getSimpleName() + CONCURRENT_LOCK;
+		}
+	}
+
+	@Override
+	public void execute(JobExecutionContext context) throws JobExecutionException {
+
+		if(SysInitBeanHelper.inited) {
+			//如果有并发禁止锁,要锁多机并发,否则还是会并发
+			if(lockJobClass != null) {
+				RLock lockJob = redissonClient.getLock(lockJobClass);
+				if(lockJob.tryLock()) { //如果有锁,把线程释放出去干其他事情
+					try {
+						this.run();
+					}catch(Throwable tr) {
+						throw new JobExecutionException("Something awful happened", tr, false);
+					}finally {
+						lockJob.unlock();
+					}
+				}
+			}else {
+				this.run();
+			}
+		}else {
+//			throw new JobExecutionException("Data not ready", false);
+			//NO OP
+		}
+	}
+}

+ 72 - 0
src/main/java/org/ccframe/commons/quartz/JobMonitorListener.java

@@ -0,0 +1,72 @@
+package org.ccframe.commons.quartz;
+
+import org.quartz.JobDetail;
+import org.quartz.JobExecutionContext;
+import org.quartz.JobExecutionException;
+import org.quartz.JobKey;
+import org.quartz.Trigger;
+import org.quartz.listeners.JobListenerSupport;
+
+import com.alibaba.fastjson.JSON;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * Job 监听器,输出状态
+ *
+ * @author xingnana
+ * @create 2020-01-17
+ */
+@Slf4j
+public class JobMonitorListener extends JobListenerSupport {
+ 
+    /**
+     * 获取该JobListener的名称
+     * @return
+     */
+    @Override
+    public String getName() {
+        String name = getClass().getSimpleName();
+//        log.info("Job监听器名字:{}",name);
+        return name;
+    }
+ 
+    /**
+     * Job将要被执行
+     * @param jobExecutionContext
+     */
+    @Override
+    public void jobToBeExecuted(JobExecutionContext jobExecutionContext) {
+        JobDetail jobDetail=jobExecutionContext.getJobDetail();
+        JobKey jobKey=jobDetail.getKey();
+        Trigger trigger=jobExecutionContext.getTrigger();
+        log.debug("执行定时任务:{}.{}",jobKey.getGroup(),jobKey.getName());
+//        log.info("Job将要被执行, jobName: {}, jobGroup: {}, jobDetail: {}, trigger: {}",jobKey.getName(),jobKey.getGroup(), JSON.toJSONString(jobDetail), JSON.toJSONString(trigger));
+    }
+ 
+    /**
+     * Job被拒绝执行
+     * @param jobExecutionContext
+     */
+    @Override
+    public void jobExecutionVetoed(JobExecutionContext jobExecutionContext) {
+        JobDetail jobDetail=jobExecutionContext.getJobDetail();
+        JobKey jobKey=jobDetail.getKey();
+        log.warn("Job被TriggerListener拒绝执行, jobName: {}, jobGroup: {}, jobDetail: {}",jobKey.getName(),jobKey.getGroup(), JSON.toJSONString(jobDetail));
+    }
+ 
+    /**
+     * Job执行后
+     * @param jobExecutionContext
+     * @param e
+     */
+    @Override
+    public void jobWasExecuted(JobExecutionContext jobExecutionContext, JobExecutionException e) {
+        JobDetail jobDetail=jobExecutionContext.getJobDetail();
+        JobKey jobKey=jobDetail.getKey();
+        log.debug("定时任务执行完成:{}.{}",jobKey.getGroup(),jobKey.getName());
+//        log.info("Job执行完成, jobName: {}, jobGroup: {}, jobDetail: {}",jobKey.getName(),jobKey.getGroup(), JSON.toJSONString(jobDetail));
+    }
+ 
+ 
+}

+ 31 - 0
src/main/java/org/ccframe/commons/quartz/QuartzJobFactory.java

@@ -0,0 +1,31 @@
+package org.ccframe.commons.quartz;
+
+import org.quartz.spi.TriggerFiredBundle;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.config.AutowireCapableBeanFactory;
+import org.springframework.scheduling.quartz.AdaptableJobFactory;
+
+/**
+ * job工厂,可以使用Runnalbe自动注入
+ */
+public class QuartzJobFactory extends AdaptableJobFactory {
+ 
+    //AutowireCapableBeanFactory 可以将一个对象添加到SpringIOC容器中,并且完成该对象注入
+    @Autowired
+    private AutowireCapableBeanFactory autowireCapableBeanFactory;
+ 
+    /**
+     *
+     * 将实例化的任务对象手动添加到Spring容器中,完成对象的注入,否则程序会报空指针异常
+     * @param bundle
+     * @return
+     * @throws Exception
+     */
+    @Override
+    protected Object createJobInstance(TriggerFiredBundle bundle) throws Exception {
+        Object jobInstance=super.createJobInstance(bundle);
+        this.autowireCapableBeanFactory.autowireBean(jobInstance);
+        return jobInstance;
+    }
+
+}

+ 42 - 0
src/main/java/org/ccframe/commons/quartz/Scheduled.java

@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.ccframe.commons.quartz;
+
+import static java.lang.annotation.ElementType.TYPE;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+/**
+ * 处理策略:create模式,会主动创建所有的定时器并启动
+ * 标准模式会比较所有的定时任务,如果有新的任务,将自动将任务设置为暂停状态。以便所有的集群服务器全部都更新后再手动进行启动
+ * @author JIM
+ *
+ */
+@Target({ TYPE })
+@Retention(RUNTIME)
+@Documented
+public @interface Scheduled
+{
+	String cron() default "";
+
+	long fixedRate() default -1;
+}

+ 43 - 0
src/main/java/org/ccframe/commons/queue/QueueClient.java

@@ -0,0 +1,43 @@
+package org.ccframe.commons.queue;
+
+import org.redisson.api.RBoundedBlockingQueue;
+import org.redisson.api.RedissonClient;
+import org.springframework.beans.factory.InitializingBean;
+import org.springframework.beans.factory.annotation.Autowired;
+
+import java.util.List;
+import java.util.stream.Collectors;
+
+import org.ccframe.commons.helper.ValueWrapper;
+import org.ccframe.config.GlobalEx;
+
+public abstract class QueueClient<T> implements InitializingBean {
+
+	private RBoundedBlockingQueue<ValueWrapper<T>> queue;
+	
+	@Autowired
+	private RedissonClient redissonClient;
+
+	public void offer(T t){
+		offer(t, GlobalEx.DEFAULT_SMALL_PICT_SIZE);
+	}
+
+	public void offer(T t, int priority){
+		ValueWrapper<T> v = new ValueWrapper<T>(t);
+		queue.offer(v);
+	}
+	
+	@Override
+	public void afterPropertiesSet() throws Exception {
+		queue = redissonClient.getBoundedBlockingQueue(getClass().getSimpleName().replaceAll("Server|Client",""));
+	}
+
+	public int currentSize() {
+		return queue.size();
+	}
+
+	public List<T> getQueue(){
+		return queue.readAll().stream().map(item -> item.getValue()).collect(Collectors.toList());
+	}
+
+}

+ 91 - 0
src/main/java/org/ccframe/commons/queue/QueueServer.java

@@ -0,0 +1,91 @@
+package org.ccframe.commons.queue;
+
+import java.util.Comparator;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.redisson.RedissonShutdownException;
+import org.redisson.api.RBoundedBlockingQueue;
+import org.redisson.api.RPriorityBlockingQueue;
+import org.redisson.api.RedissonClient;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.DisposableBean;
+import org.springframework.beans.factory.InitializingBean;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
+
+import org.ccframe.commons.helper.ValueWrapper;
+
+public abstract class QueueServer<T> implements InitializingBean, DisposableBean{
+
+	abstract protected void onPoll(T t);
+
+    @Value("${app.init.qserver}")
+	private boolean qserver;
+	
+	@Autowired
+	private RedissonClient redissonClient;
+
+	private Logger logger = LoggerFactory.getLogger(this.getClass().getName());
+
+	private static ExecutorService poolExecutor = Executors.newCachedThreadPool();
+	
+	private static int subscriptionCount = 0; //redission的订阅数,当为qserver时开启,如果订阅数 
+	
+	public QueueServer() {
+		subscriptionCount += 1;
+	}
+	
+	public static int getSubscriptionCount() {
+		return subscriptionCount;
+	}
+
+//	private Comparator<ValueWrapper<T>> valueComparator = new Comparator<ValueWrapper<T>>() {
+//		@Override
+//		public int compare(ValueWrapper<T> v1, ValueWrapper<T> v2) {
+//			return v1.getPriority() - v1.getPriority();
+//		}
+//	};
+	
+	@Override
+	public void afterPropertiesSet() throws Exception {
+
+		if(!qserver) {
+			return;
+		}
+		
+		poolExecutor.execute(new Runnable() {
+			@Override
+			public void run() {
+				RBoundedBlockingQueue<ValueWrapper<T>> queue = redissonClient.getBoundedBlockingQueue(QueueServer.this.getClass().getSimpleName().replaceAll("Server|Client",""));
+//				RPriorityBlockingQueue<ValueWrapper<T>> queue = redissonClient.getPriorityBlockingQueue(QueueServer.this.getClass().getSimpleName().replaceAll("Server|Client",""));
+				logger.warn("当前队列长度:"+queue.size());
+//				queue.trySetComparator(valueComparator);
+				queue.trySetCapacity(1000000); //一百万队列
+
+				while(true) {
+					try {
+						ValueWrapper<T> t = queue.poll(10, TimeUnit.SECONDS);
+						if(t != null) {
+							QueueServer.this.onPoll(t.getValue());
+						}
+					}catch(Throwable tr) {
+						tr.printStackTrace();
+						if(tr instanceof RedissonShutdownException){
+							break;
+						}
+					}
+				}
+			}
+		});
+	}
+
+	@Override
+	public void destroy() throws Exception {
+		if(!poolExecutor.isShutdown()) {
+			poolExecutor.shutdown();
+		}
+	}
+}

+ 99 - 0
src/main/java/org/ccframe/commons/servlet/CaptchaServlet.java

@@ -0,0 +1,99 @@
+package org.ccframe.commons.servlet;
+
+import java.io.File;
+import java.io.IOException;
+
+import javax.servlet.ServletException;
+import javax.servlet.annotation.WebInitParam;
+import javax.servlet.annotation.WebServlet;
+import javax.servlet.http.Cookie;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.ccframe.commons.helper.SpringContextHelper;
+import org.ccframe.commons.helper.ValidateCodeHelper;
+import org.ccframe.commons.util.VerifyCodeUtils;
+import org.ccframe.config.GlobalEx;
+import org.ccframe.subsys.core.service.PlatformSearchService;
+import org.redisson.api.RMapCache;
+import org.redisson.api.RedissonClient;
+
+/**
+ * 校验码,同时做启动日志输出.
+ * @author JIM
+ *
+ */
+@WebServlet(urlPatterns={"/captcha/captchaImg"},loadOnStartup=0,initParams=	{
+	@WebInitParam(name="avaiableChars",value="bcdefghkmnprtuvwxy34678"),
+	@WebInitParam(name="width",value="200"),
+	@WebInitParam(name="height",value="80")
+})
+public class CaptchaServlet extends HttpServlet {
+
+	private static final long serialVersionUID = -2178175559012074926L;
+	
+	private static final String PAGE_ID_NAME = "sid";
+	
+	private String avaiableChars = "23456789ABCDEFGHJKLMNPQRSTUVWXYZ";
+	private int width = 200;
+	private int height = 80;
+
+	private void initParameter(){
+		if(getServletConfig().getInitParameter("avaiableChars") != null){
+			avaiableChars = getServletConfig().getInitParameter("avaiableChars");		
+		}
+		if(getServletConfig().getInitParameter("width") != null){
+			width = Integer.parseInt(getServletConfig().getInitParameter("width"));		
+		}
+		if(getServletConfig().getInitParameter("height") != null){
+			height = Integer.parseInt(getServletConfig().getInitParameter("height"));		
+		}
+	}
+	
+	@Override
+	public void init() throws ServletException {
+		super.init();
+		initParameter();
+		VerifyCodeUtils.initFont(GlobalEx.APP_BASE_DIR + File.separator + GlobalEx.STATICS_DIR + File.separator + "Algerian.ttf");
+	}
+
+	public void doPost(HttpServletRequest request, HttpServletResponse response)
+            throws ServletException, IOException {
+        doGet(request, response);
+    }
+
+    public void doGet(HttpServletRequest request, HttpServletResponse response)
+            throws ServletException, IOException {
+    	PlatformSearchService platformSearchService = SpringContextHelper.getBean(PlatformSearchService.class);
+    	if(platformSearchService == null) {
+    		return;
+    	}
+
+		String currentServerName = request.getServerName();
+		Integer platformId = null;
+		if(request.getCookies() != null) {
+			for(Cookie cookie: request.getCookies()) {
+				if(cookie.getName().equals(GlobalEx.ADMIN_SITE)) { //可以通过cookie覆盖
+					currentServerName = cookie.getValue();
+				}
+			}
+			platformId = platformSearchService.serverNameToPlatformId(currentServerName);
+		}
+
+        //设置页面不缓存
+    	response.setHeader("Pragma", "No-cache");
+        response.setHeader("Cache-Control", "no-cache");
+        response.setDateHeader("Expires", 0);
+        response.setHeader("content-type", "image/jpeg");
+
+        String verifyCode = VerifyCodeUtils.generateVerifyCode(4, avaiableChars);
+        // 将认证码存入SESSION
+        if(platformId != null) {
+            SpringContextHelper.getBean(ValidateCodeHelper.class).putCode(request.getParameter(PAGE_ID_NAME) + GlobalEx.DEFAULT_TEXT_SPLIT_CHAR + platformId, verifyCode.toLowerCase()); //使用sid+平台ID格式减少碰撞
+        }
+        VerifyCodeUtils.outputImage(width, height, response.getOutputStream(), verifyCode);
+        response.getOutputStream().close();
+    }
+   
+}

+ 98 - 0
src/main/java/org/ccframe/commons/servlet/QrcodeServlet.java

@@ -0,0 +1,98 @@
+package org.ccframe.commons.servlet;
+
+import java.io.IOException;
+import java.util.Hashtable;
+import java.util.Map.Entry;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import com.google.zxing.BarcodeFormat;
+import com.google.zxing.EncodeHintType;
+import com.google.zxing.WriterException;
+import com.google.zxing.client.j2se.MatrixToImageConfig;
+import com.google.zxing.client.j2se.MatrixToImageWriter;
+import com.google.zxing.qrcode.QRCodeWriter;
+import com.google.zxing.qrcode.decoder.ErrorCorrectionLevel;
+
+/**
+ * 联图API规范 https://www.liantu.com/pingtai/ 二维码接口
+ * 
+ * 目前实现的:
+ * bg	背景颜色	bg=颜色代码,例如:bg=ffffff 默认透明
+ * fg	前景颜色	fg=颜色代码,例如:fg=cc0000 默认黑色
+ * el	纠错等级	el可用值:h\q\m\l,例如:el=h 默认quick
+ * m	静区(外边距)	m=数值(像素),例如:m=30 
+ * w	尺寸大小	w=数值(像素),例如:w=300
+ *
+ * 以下参数可用于引用二维码图片时定义图片样式
+ *   参数引用例子:http://qr.liantu.com/api.php?&bg=ffffff&fg=cc0000&text=x
+ * @author JIM
+ *
+ */
+public class QrcodeServlet extends HttpServlet {
+
+	private static final long serialVersionUID = -588005957999328630L;
+
+	private static final String DEFAULT_TEXT = "Jim@深圳市宏博智软科技";
+	
+	@Override
+	protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
+
+		String content = DEFAULT_TEXT;
+
+		Hashtable<EncodeHintType, Comparable<?>> hints = new Hashtable<>();
+        hints.put(EncodeHintType.CHARACTER_SET, "UTF-8"); // 字符转码格式设置
+		int fg = MatrixToImageConfig.BLACK; 
+		int bg = MatrixToImageConfig.WHITE;
+		int margin = 0;	int size = 240;
+		for(Entry<String, String[]> entry: req.getParameterMap().entrySet()) {
+			if("bg".equals(entry.getKey())) {
+				bg = Integer.parseInt(entry.getValue()[0].toUpperCase(), 16) | 0xff000000;
+			}
+			if("fg".equals(entry.getKey())) {
+				fg = Integer.parseInt(entry.getValue()[0].toUpperCase(), 16) | 0xff000000;
+			}
+			if("el".equals(entry.getKey())) {
+				switch(entry.getValue()[0]) {
+					case "h":{
+						hints.put(EncodeHintType.ERROR_CORRECTION, ErrorCorrectionLevel.H); //高
+						break;
+					}
+					case "q":{
+						hints.put(EncodeHintType.ERROR_CORRECTION, ErrorCorrectionLevel.Q); //快速
+						break;
+					}
+					case "m":{
+						hints.put(EncodeHintType.ERROR_CORRECTION, ErrorCorrectionLevel.M); //中等
+						break;
+					}
+					case "l":{
+						hints.put(EncodeHintType.ERROR_CORRECTION, ErrorCorrectionLevel.L); //最低
+						break;
+					}
+				}
+			}
+			if("m".equals(entry.getKey())) {
+				margin = Integer.parseInt(entry.getValue()[0]);
+			}
+			if("text".equals(entry.getKey())) {
+				content = entry.getValue()[0];
+			}
+			if("w".equals(entry.getKey())) {
+				size = Integer.parseInt(entry.getValue()[0]);
+			}
+		}
+        hints.put(EncodeHintType.MARGIN, margin); // 空白边距设置
+        hints.put(EncodeHintType.ERROR_CORRECTION, ErrorCorrectionLevel.Q); // 容错级别设置 默认为Q
+		try {
+	        MatrixToImageWriter.writeToStream(new QRCodeWriter().encode(content, BarcodeFormat.QR_CODE, size, size, hints), "PNG", resp.getOutputStream(), new MatrixToImageConfig(fg, bg));
+		} catch (WriterException e) {
+			e.printStackTrace();
+		}		
+        resp.flushBuffer();
+	}
+
+}

+ 21 - 0
src/main/java/org/ccframe/commons/sms/ISmsAdapter.java

@@ -0,0 +1,21 @@
+package org.ccframe.commons.sms;
+
+import org.ccframe.subsys.core.queue.SmsQueueData;
+
+public interface ISmsAdapter {
+
+	/**
+	 * 构造要发送的内容.用于记录到数据库
+	 * @param smsParam
+	 * @return
+	 */
+	String buildSmsMessage(SmsQueueData smsParam);
+	
+	/**
+	 * 发送单条短信.
+	 * @param smsParam 发送的消息内容.
+	 * @return 回执编号
+	 * @throws SmsException 包含异常信息和回执编号(如果需要回写)
+	 */
+	String sendSms(SmsQueueData smsParam) throws SmsException;
+}

+ 28 - 0
src/main/java/org/ccframe/commons/sms/SmsException.java

@@ -0,0 +1,28 @@
+package org.ccframe.commons.sms;
+
+public class SmsException extends Exception{
+
+	private static final long serialVersionUID = 1661680494275596794L;
+
+	private String faildReason;
+	private String receiptNum;
+
+	public SmsException() {}
+	
+	public SmsException(String faildReason, String receiptNum) {
+		this.faildReason = faildReason;
+		this.receiptNum = receiptNum;
+	}
+	public String getFaildReason() {
+		return faildReason;
+	}
+	public void setFaildReason(String faildReason) {
+		this.faildReason = faildReason;
+	}
+	public String getReceiptNum() {
+		return receiptNum;
+	}
+	public void setReceiptNum(String receiptNum) {
+		this.receiptNum = receiptNum;
+	}
+}

+ 81 - 0
src/main/java/org/ccframe/commons/util/BigDecimalUtil.java

@@ -0,0 +1,81 @@
+package org.ccframe.commons.util;
+
+import java.math.BigDecimal;
+import java.math.MathContext;
+
+/**
+ * 
+ * 用于数值加减运算。保留8位精度。
+ * @author JIM
+ *
+ */
+public class BigDecimalUtil {
+    private BigDecimalUtil() {
+    }
+
+    private static final int MAX_PRECISION = 8;
+    
+    /**
+     * 输入多个数字进行相乘
+     * @param values
+     * @return
+     */
+    public static Double multiply(Number... values) {
+        BigDecimal result = new BigDecimal(1);
+        for (Number v : values) {
+            if(v==null){
+                continue;
+            }
+            result = result.multiply(new BigDecimal(String.valueOf(v)), MathContext.DECIMAL64);
+        }
+        return result.setScale(MAX_PRECISION, BigDecimal.ROUND_HALF_UP).doubleValue();
+    }
+
+    /**
+     * 输入多个数字进行相加
+     * @param values
+     * @return
+     */
+    public static Double add(Number... values) {
+        BigDecimal result = BigDecimal.ZERO;
+        for (Number v : values) {
+            if(v==null){
+                continue;
+            }
+            result = result.add(new BigDecimal(String.valueOf(v)), MathContext.DECIMAL64);
+        }
+        return result.setScale(MAX_PRECISION, BigDecimal.ROUND_HALF_UP).doubleValue();
+    }
+
+    /**
+     * 前面减后面
+     * @param start
+     * @param end
+     * @return
+     */
+    public static Double subtract(Number start, Number end) {
+        BigDecimal result = new BigDecimal(String.valueOf(start==null?0:start)).subtract(new BigDecimal(String.valueOf(end==null?0:end)));
+        return result.setScale(MAX_PRECISION, BigDecimal.ROUND_HALF_UP).doubleValue();
+    }
+
+    /**
+     * 前台除后面
+     * @param start
+     * @param end
+     * @return
+     */
+    public static Double divide(Number start, Number end) {
+        BigDecimal result = new BigDecimal(String.valueOf(start==null?0:start)).divide(new BigDecimal(String.valueOf(end)), MathContext.DECIMAL32);
+        return result.setScale(MAX_PRECISION, BigDecimal.ROUND_HALF_UP).doubleValue();
+    }
+
+    /**
+     * @param decimalDigit
+     * @param value
+     * @return
+     */
+    public static Double roundHalfUp(int decimalDigit, Number value) {
+    	return new BigDecimal(String.valueOf(value == null ? 0: value)).setScale(decimalDigit, BigDecimal.ROUND_HALF_UP).doubleValue();
+    }
+
+}

+ 61 - 0
src/main/java/org/ccframe/commons/util/BrowserTimeCacheUtil.java

@@ -0,0 +1,61 @@
+package org.ccframe.commons.util;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+/**
+ * 文章浏览计数缓存,每个一定时间进行写入.
+ * @author JIM
+ *
+ */
+public class BrowserTimeCacheUtil {
+
+	private BrowserTimeCacheUtil() {}
+	
+	private static final int CACHE_SIZE = 500; //默认缓存500个文章的浏览记录
+
+	private static Map<Integer, Integer> browserTimeCacheMap = Collections.synchronizedMap(new LinkedHashMap<Integer, Integer>(CACHE_SIZE){
+
+		private static final long serialVersionUID = 2082087443457277997L;
+
+		@Override
+		protected boolean removeEldestEntry(Map.Entry<Integer, Integer> eldest) {
+			// 当前记录数大于设置的最大的记录数,删除最旧记录(即最近访问最少的记录)
+			return size() > CACHE_SIZE;
+		}
+	});
+	
+	/**
+	 * 列表获得文章浏览数
+	 * @param articleInfId
+	 * @return
+	 */
+	public static Integer getBrowserTime(Integer articleInfId) {
+		return browserTimeCacheMap.get(articleInfId);
+	}
+	
+	/**
+	 * 查看文章详情时同时刷新cache计数
+	 * @param articleInfId
+	 * @param defaultBrowserTime
+	 * @return
+	 */
+	public static synchronized int updateBrowserTime(Integer articleInfId, Integer defaultBrowserTime) {
+		Integer browserTime = browserTimeCacheMap.get(articleInfId);
+		browserTime = (browserTime == null ? defaultBrowserTime: browserTime) + 1;
+		browserTimeCacheMap.put(articleInfId, browserTime);
+		return browserTime;
+	}
+
+	public static void resetBrowerTime(Integer articleInfId) {
+		browserTimeCacheMap.remove(articleInfId);
+	}
+	
+	public static List<Entry<Integer,Integer>> getBrowserTimeCache(){
+		return new ArrayList<Entry<Integer,Integer>>(browserTimeCacheMap.entrySet());
+	}
+}

+ 159 - 0
src/main/java/org/ccframe/commons/util/BusinessException.java

@@ -0,0 +1,159 @@
+package org.ccframe.commons.util;
+
+import org.ccframe.config.ResGlobalEx;
+
+/**
+ * 业务逻辑异常封装类,当在业务逻辑处理时出现异常,可抛出此异常,由全局定义的BusinessExceptionHandler处理。
+ *  if (service.isContractDuplicated(contractNo))
+ *	  throw new BusinessException("bizErr.duplicatedContractNo",new String[]{contractNo});//根据具体情况,传入的参数最多可至4个
+ * 常见的参数构建含义:
+ * code 错误码. 可以根据该字符串判断做出不同的异常处理方式。也是国际化资源key
+ * args 参数. 当code对应的资源有{number}占位符时,将该字符串序列依次替换内容
+ * message 自定义的异常message. 异常toString的输出内容
+ * useSimpleLog 是否使用简单日志. 如果是非系统跟踪性的常规异常,只输出异常的内容。减少日志
+ * viewData 错误对象. 当采用MVC页面跳转时,可以将该数据带到错误页面,提示更多的信息
+ *  
+ * @author Jim Wu 
+ * @since 1.0
+ */
+public class BusinessException extends RuntimeException{
+
+	/**
+	 * 
+	 */
+	private static final long serialVersionUID = -7970756494558100175L;
+
+    private String code;
+	private Object[] args = new Object[0];
+    private Object viewData; //to be presented in the exception error page,will be stored as request attribute with key of Constants.RESULT_KEY
+    /**
+     * 决定了日志是否需要输出全部,例如登陆错误,可以不需要记录详细stacktrace
+     */
+    private boolean useSimpleLog;
+
+    public Object getViewData() {
+        return viewData;
+    }
+
+    public void setViewData(Object viewData) {
+        this.viewData = viewData;
+    }
+
+    @Override
+	public boolean equals(Object obj) {
+    	if(obj == null){
+    		return false;
+    	}
+    	if(!(obj instanceof BusinessException)){
+    		return false;
+    	}
+    	BusinessException targetBusinessException = (BusinessException)obj;
+    	if(!targetBusinessException.getCode().equals(this.getCode())){
+    		return false;
+    	}
+    	Object[] targetArgs = targetBusinessException.getArgs();
+    	if(getArgs().length != targetArgs.length){
+    		return false;
+    	}
+    	if(getArgs().length == 0){
+    		return true;
+    	}
+    	for(int i = 0; i < getArgs().length; i ++){
+    		if(!getArgs()[i].equals(targetArgs[i])){
+    			return false;
+    		}
+    	}
+    	return true;
+	}
+
+	@Override
+	public int hashCode() { //NOSONAR
+		return super.hashCode(); //NOSONAR
+	}
+
+	public String getCode(){
+		return code;
+	}
+
+	public Object[] getArgs(){
+		return args;
+	}
+
+	public BusinessException(String code, boolean useSimpleLog){
+		this.code = code;
+		this.useSimpleLog = useSimpleLog;
+	}
+
+	public BusinessException(String code){
+		this.code = code;
+	}
+
+	public BusinessException(String code,Object[] args){
+		this(code, args, true);
+	}
+	
+	public BusinessException(String code,Object[] args, boolean useSimpleLog){
+		this.code = code ;
+		this.args = args;
+		this.useSimpleLog = useSimpleLog;
+	}
+
+	public BusinessException(String code,Object[] args, String message, boolean useSimpleLog){
+		super(message);
+		this.code = code;
+		this.args = args;
+		this.useSimpleLog = useSimpleLog;
+	}
+
+	public BusinessException(String code,Throwable cause){
+		super(cause);
+		this.code = code;
+	}
+
+	public BusinessException(String code,Object[] args,String message,Throwable cause){
+		super(message,cause);
+		this.code = code;
+		this.args = args;
+	}
+
+    public BusinessException(String code,Object viewData){
+		this.code = code;
+		this.viewData = viewData;
+	}
+
+	public BusinessException(String code,Object[] args,Object viewData){
+		this.code = code ;
+		this.args = args;
+        this.viewData = viewData;
+	}
+
+	public BusinessException(String code,Object[] args,String message,Object viewData){
+		super(message);
+		this.code = code;
+		this.args = args;
+        this.viewData = viewData;
+	}
+
+	public BusinessException(String code,Throwable cause,Object viewData){
+		super(cause);
+		this.code = code;
+        this.viewData = viewData;
+	}
+
+	public BusinessException(String code,Object[] args,String message,Throwable cause,Object viewData){
+		super(message,cause);
+		this.code = code;
+		this.args = args;
+        this.viewData = viewData;
+	}
+
+	public boolean isUseSimpleLog() {
+		return useSimpleLog;
+	}
+
+	public void setUseSimpleLog(boolean useSimpleLog) {
+		this.useSimpleLog = useSimpleLog;
+	}
+	
+	public static final BusinessException NOT_ACCESSABLE = new BusinessException(ResGlobalEx.ERRORS_USER_DEFINED, new String[]{"您无权进行此操作"});
+}

+ 162 - 0
src/main/java/org/ccframe/commons/util/CcTransactionUtil.java

@@ -0,0 +1,162 @@
+package org.ccframe.commons.util;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.commons.lang3.StringUtils;
+import org.ccframe.commons.base.BaseEntity;
+import org.ccframe.commons.base.BaseSearchService;
+import org.ccframe.commons.helper.SpringContextHelper;
+import org.ccframe.config.GlobalEx;
+import org.springframework.data.elasticsearch.annotations.Document;
+
+import lombok.AllArgsConstructor;
+import lombok.Getter;
+import lombok.Setter;
+import lombok.extern.slf4j.Slf4j;
+import net.oschina.j2cache.CacheChannel;
+
+/**
+ * 事务处理工具,在对象持久化时,将需要操作的动作保存到队列.
+ * 在事务提交时,将所有的操作批量发送到ES和缓存.
+ * @author Administrator
+ *
+ */
+@SuppressWarnings("rawtypes")
+@Slf4j
+public class CcTransactionUtil {
+
+	private static final int BATCH_LOG_THRESHOLD = 20;
+	
+	/**
+	 * 线程操作队列
+	 */
+	private static ThreadLocal<List<Operation>> operationListLocal = new ThreadLocal<List<Operation>>();
+	
+	/**
+	 * 启动事务
+	 */
+	public static void init(){
+		operationListLocal.set(new ArrayList<Operation>());
+	}
+	
+	/**
+	 * 将对象操作保存到事务
+	 */
+	private static void innerPushOperation(Action action, BaseEntity data){
+		if(data.getClass().getAnnotation(Document.class) == null) { //没有ES的数据表就不用管了
+			return;
+		}
+		String beanName = StringUtils.uncapitalize(data.getClass().getSimpleName()) + GlobalEx.SEARCH_SERVICE_CLASS_SUFFIX; //根据名称匹配找到xxxSearchService
+		if(!SpringContextHelper.containsBean(beanName)){ //有定义ES但是没有建立service的也不会工作
+			return;
+		}
+		Object bean = SpringContextHelper.getBean(beanName);
+		CacheChannel cacheChannel = SpringContextHelper.getBean(CacheChannel.class);
+		if(bean instanceof BaseSearchService){
+			operationListLocal.get().add(new Operation(cacheChannel, action, data, (BaseSearchService)bean)); //将操作添加到队列,对当时保存的对象做浅克隆快照
+		}
+	}
+	
+	public static void pushSave(BaseEntity data){
+		innerPushOperation(Action.SAVE, data);
+	}
+	
+	public static void pushDelete(BaseEntity data){
+		innerPushOperation(Action.DELETE, data);
+	}
+
+	public static enum Action{
+		SAVE, DELETE 
+	}
+
+	/**
+	 * 需要flush时 也调用此方法
+	 */
+	@SuppressWarnings("unchecked")
+	public static void commit(){
+		List<Operation> operationList = operationListLocal.get();
+		//分组批量处理
+		Map<BaseSearchService, List<BaseEntity>> saveDataMap = new HashMap<>();
+		Map<BaseSearchService, List<BaseEntity>> deleteDataMap = new HashMap<>();
+		
+		CacheChannel cacheChannel = null;
+		for(Operation operation: operationList){ //分组
+			Map<BaseSearchService, List<BaseEntity>> opDataMap = (operation.getAction() == Action.SAVE ? saveDataMap : deleteDataMap);
+			if(cacheChannel == null) {
+				cacheChannel = operation.getCacheChannel();
+			}
+
+			List<BaseEntity> opList = opDataMap.get(operation.getSearchService());
+			if(opList == null) {
+				opList = new ArrayList<BaseEntity>();
+				opDataMap.put(operation.getSearchService(), opList);
+			}
+			opList.add(operation.getData());
+		}
+		
+		for(Entry<BaseSearchService, List<BaseEntity>> entryList: saveDataMap.entrySet()) { //先更新
+			entryList.getKey().saveBatch(entryList.getValue());
+			String entityName = null;
+			for(BaseEntity baseEntity: entryList.getValue()) {
+				if(entityName == null) {
+					entityName = baseEntity.getClass().getSimpleName();
+				}
+				cacheChannel.set(GlobalEx.CACHEREGION_ENTITY, entityName + "-" + baseEntity.getId(), baseEntity); //缓存key格式 <类名>-<ID值>
+			}
+			if(entryList.getValue().size() > BATCH_LOG_THRESHOLD) {
+				log.info(entityName + "批量建立" + entryList.getValue().size() + "条索引");
+			}
+		}
+
+		for(Entry<BaseSearchService, List<BaseEntity>> entryList: deleteDataMap.entrySet()) { //再删除
+			entryList.getKey().deleteBatch(entryList.getValue());
+			String entityName = null;
+			for(BaseEntity baseEntity: entryList.getValue()) {
+				if(entityName == null) {
+					entityName = baseEntity.getClass().getSimpleName();
+				}
+				cacheChannel.set(GlobalEx.CACHEREGION_ENTITY, entityName + "-" + baseEntity.getId(), baseEntity); //缓存key格式 <类名>-<ID值>
+			}
+			if(entryList.getValue().size() > BATCH_LOG_THRESHOLD) {
+				log.info(entityName + "批量删除" + entryList.getValue().size() + "条索引");
+			}
+		}
+
+		operationList.clear();
+	}
+	
+	public static void rollback(){
+		for(Operation operation: operationListLocal.get()) { //出现异常,不管三七二十一把所有加入的缓存都丢掉,以免出问题
+			try {
+				operation.getCacheChannel().evict(GlobalEx.CACHEREGION_ENTITY, operation.getData().getClass().getSimpleName() + "-" + operation.getData().getId().toString());
+			}catch(Exception e) {
+				e.printStackTrace();
+			}
+		}
+		operationListLocal.get().clear();
+	}
+	
+	/**
+	 * 索引操作对象.
+	 * @author JIM
+	 */
+
+	@Getter
+	@Setter
+	@AllArgsConstructor
+	private static class Operation{
+
+		private CacheChannel cacheChannel;
+
+		private Action action;
+		
+		private BaseEntity data;
+		
+		private BaseSearchService searchService;
+
+	}
+}

+ 107 - 0
src/main/java/org/ccframe/commons/util/DESUtil.java

@@ -0,0 +1,107 @@
+package org.ccframe.commons.util;
+
+import java.security.Key;
+import java.util.Base64;
+
+import javax.crypto.Cipher;
+import javax.crypto.SecretKeyFactory;
+import javax.crypto.spec.DESKeySpec;
+import javax.crypto.spec.SecretKeySpec;
+
+public class DESUtil {
+	 
+    /**
+     * 密钥算法
+     */
+    private static final String ALGORITHM = "DES";
+    /**
+     * 加密/解密算法-工作模式-填充模式
+     */
+    private static final String CIPHER_ALGORITHM = "DES/ECB/PKCS5Padding";
+    /**
+     * 默认编码
+     */
+    private static final String CHARSET = "utf-8";
+ 
+    /**
+     * 生成key
+     *
+     * @param password
+     * @return
+     * @throws Exception
+     */
+    private static Key generateKey(String password) throws Exception {
+        DESKeySpec dks = new DESKeySpec(password.getBytes(CHARSET));
+        SecretKeyFactory keyFactory = SecretKeyFactory.getInstance(ALGORITHM);
+        return keyFactory.generateSecret(dks);
+    }
+ 
+ 
+    /**
+     * DES加密字符串
+     *
+     * @param password 加密密码,长度不能够小于8位
+     * @param data 待加密字符串
+     * @return 加密后内容
+     */
+    public static String encrypt(String password, String data) {
+        if (password== null || password.length() < 8) {
+            throw new RuntimeException("加密失败,key不能小于8位");
+        }
+        if (data == null)
+            return null;
+        try {
+        	Cipher cipher = Cipher.getInstance(CIPHER_ALGORITHM);  
+            cipher.init(Cipher.ENCRYPT_MODE, new SecretKeySpec(getKey(password), ALGORITHM));  
+            byte[] encryptedData = cipher.doFinal(data.getBytes(CHARSET));  
+            return Base64.getEncoder().encodeToString(encryptedData);
+        } catch (Exception e) {
+            e.printStackTrace();
+            return data;
+        }
+    }
+ 
+    /** 
+     * key  不足8位补位
+     * @param string  
+     */  
+    public static byte[] getKey(String keyRule) {  
+        Key key = null;  
+        byte[] keyByte = keyRule.getBytes();  
+        // 创建一个空的八位数组,默认情况下为0  
+        byte[] byteTemp = new byte[8];  
+        // 将用户指定的规则转换成八位数组  
+        for (int i = 0; i < byteTemp.length && i < keyByte.length; i++) {  
+            byteTemp[i] = keyByte[i];  
+        }  
+        key = new SecretKeySpec(byteTemp, CHARSET);  
+        return key.getEncoded();  
+    }
+
+    /**
+     * DES解密字符串
+     *
+     * @param password 解密密码,长度不能够小于8位
+     * @param data 待解密字符串
+     * @return 解密后内容
+     */
+    public static String decrypt(String password, String data) {
+        if (password== null || password.length() < 8) {
+            throw new RuntimeException("加密失败,key不能小于8位");
+        }
+        if (data == null)
+            return null;
+        try {
+			byte[] sourceBytes = Base64.getDecoder().decode(data);    
+			Cipher cipher = Cipher.getInstance("DES/ECB/PKCS5Padding");   
+			cipher.init(Cipher.DECRYPT_MODE, new SecretKeySpec(getKey(password), ALGORITHM));    
+			byte[] decoded = cipher.doFinal(sourceBytes);    
+			return new String(decoded, "UTF-8");  
+		} catch (Exception e) {
+			e.printStackTrace();
+			return data;
+		}
+
+    }
+
+}

+ 170 - 0
src/main/java/org/ccframe/commons/util/DbUnitUtils.java

@@ -0,0 +1,170 @@
+package org.ccframe.commons.util;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import javax.sql.DataSource;
+
+import org.apache.commons.lang3.StringUtils;
+import org.ccframe.commons.dbunit.DatabaseOperationEx;
+import org.ccframe.commons.dbunit.InitFileReplacementProcessor;
+import org.ccframe.commons.dbunit.RandomReplacementProcessor;
+import org.ccframe.commons.dbunit.ReplacementDataSet;
+import org.ccframe.commons.dbunit.ReplacementProcessor;
+import org.ccframe.config.GlobalEx;
+import org.dbunit.DatabaseUnitException;
+import org.dbunit.database.DatabaseConfig;
+import org.dbunit.database.DatabaseDataSourceConnection;
+import org.dbunit.database.DatabaseSequenceFilter;
+import org.dbunit.database.IDatabaseConnection;
+import org.dbunit.dataset.FilteredDataSet;
+import org.dbunit.dataset.IDataSet;
+import org.dbunit.dataset.xml.FlatXmlDataSet;
+import org.dbunit.dataset.xml.FlatXmlDataSetBuilder;
+import org.dbunit.ext.db2.Db2DataTypeFactory;
+import org.dbunit.ext.h2.H2DataTypeFactory;
+import org.dbunit.ext.hsqldb.HsqldbDataTypeFactory;
+import org.dbunit.ext.mssql.MsSqlDataTypeFactory;
+import org.dbunit.ext.mysql.MySqlDataTypeFactory;
+import org.dbunit.ext.oracle.Oracle10DataTypeFactory;
+import org.dbunit.ext.oracle.OracleDataTypeFactory;
+import org.dbunit.ext.postgresql.PostgresqlDataTypeFactory;
+import org.dbunit.operation.DatabaseOperation;
+import org.dbunit.operation.MsSqlInsertOperation;
+import org.dbunit.operation.TransactionOperation;
+
+public class DbUnitUtils {
+
+	private static final String REPLACEMENT_START_DELIMITER = "##{";
+	private static final String REPLACEMENT_END_DELIMITER = "}##";
+	
+//	private static final String SEQUENCE_START_DELIMITER = "$#{";
+//	private static final String SEQUENCE_END_DELIMITER = "}#$";
+
+	private DbUnitUtils(){}
+	
+	public static enum DBTYPE{
+		MYSQL, H2, MSSQL, ORACLE, ORACLE10, DB2, HSQLDB, PSQL;
+
+		public static DBTYPE fromCode(String code) {
+			try {
+				return values()[Integer.parseInt(code)];
+			} catch (Exception e) {
+				return null;
+			}
+		}
+
+		public String toCode() {
+			return Integer.toString(this.ordinal());
+		}
+	}
+
+	private static void filterDeleteData(FlatXmlDataSet xmlDataSet){
+		
+	}
+	
+	private static void loadDbUnitData(DatabaseOperation dbOperation, DataSource dataSource, String schema, DBTYPE dbType, Map<String, String> replacementToken, String... xmlPaths) throws DatabaseUnitException, SQLException { //NOSONAR
+		IDatabaseConnection connection = (StringUtils.isNotBlank(schema) ? new DatabaseDataSourceConnection(dataSource, schema) : new DatabaseDataSourceConnection(dataSource));
+		connection.getConfig().setProperty(DatabaseConfig.FEATURE_ALLOW_EMPTY_FIELDS, true); //允许空值"",但是此会造成mysql/oracle兼容错误(oracle没有空串''而是null)
+		try{
+			for (String xmlPath : xmlPaths) {
+				try(InputStream input = new FileInputStream(GlobalEx.APP_BASE_DIR + File.separator + xmlPath);) {
+					FlatXmlDataSet xmlDataSet = new FlatXmlDataSetBuilder().setColumnSensing(false).build(input);
+					filterDeleteData(xmlDataSet);
+					org.dbunit.dataset.ReplacementDataSet replacementDataSet = new org.dbunit.dataset.ReplacementDataSet(xmlDataSet);
+					replacementDataSet.setSubstringDelimiters(REPLACEMENT_START_DELIMITER, REPLACEMENT_END_DELIMITER);
+					replacementDataSet.addReplacementObject("[null]", null);
+					for(Entry<String, String> replacementEntry :replacementToken.entrySet()){
+						replacementDataSet.addReplacementSubstring(replacementEntry.getKey(), replacementEntry.getValue());
+					}
+//					DBSequenceReplacementDataSet dbSequenceReplacementDataSet = new DBSequenceReplacementDataSet(replacementDataSet, connection, dbType);
+//					dbSequenceReplacementDataSet.setSubstringDelimiters(SEQUENCE_START_DELIMITER, SEQUENCE_END_DELIMITER);
+					
+					List<ReplacementProcessor> replacementProcessorList = new ArrayList<ReplacementProcessor>();
+					replacementProcessorList.add(new RandomReplacementProcessor());
+					replacementProcessorList.add(new InitFileReplacementProcessor());
+					IDataSet executeDataSet = new FilteredDataSet(
+						new DatabaseSequenceFilter(connection),
+//						dbSequenceReplacementDataSet
+						new ReplacementDataSet(replacementDataSet, connection, dbType, replacementProcessorList)
+					);
+					switch(dbType){
+					case MYSQL:
+						connection.getConfig().setProperty(DatabaseConfig.PROPERTY_DATATYPE_FACTORY, new MySqlDataTypeFactory());
+						connection.getConfig().setProperty(DatabaseConfig.PROPERTY_ESCAPE_PATTERN, "`?`"); //MYSQL支持关键字名称的字段
+						break;
+					case H2:
+						connection.getConfig().setProperty(DatabaseConfig.PROPERTY_DATATYPE_FACTORY, new H2DataTypeFactory());
+						break;
+					case MSSQL:
+						connection.getConfig().setProperty(DatabaseConfig.PROPERTY_DATATYPE_FACTORY, new MsSqlDataTypeFactory());
+						break;
+					case ORACLE:
+						connection.getConfig().setProperty(DatabaseConfig.PROPERTY_DATATYPE_FACTORY, new OracleDataTypeFactory());
+						break;
+					case ORACLE10:
+						connection.getConfig().setProperty(DatabaseConfig.PROPERTY_DATATYPE_FACTORY, new Oracle10DataTypeFactory());
+						break;
+					case DB2:
+						connection.getConfig().setProperty(DatabaseConfig.PROPERTY_DATATYPE_FACTORY, new Db2DataTypeFactory());
+						break;
+					case HSQLDB:
+						connection.getConfig().setProperty(DatabaseConfig.PROPERTY_DATATYPE_FACTORY, new HsqldbDataTypeFactory());
+						break;
+					case PSQL:
+						connection.getConfig().setProperty(DatabaseConfig.PROPERTY_DATATYPE_FACTORY, new PostgresqlDataTypeFactory());
+						break;
+					default:
+						return;
+					}
+					if(dbOperation.equals(DatabaseOperationEx.DELETE_ALL) || dbOperation.equals(DatabaseOperationEx.DELETE_WHERE)){
+						dbOperation.execute(connection, executeDataSet);
+					}else{
+						new TransactionOperation(dbOperation).execute(connection, executeDataSet);
+					}
+				}catch(IOException e) {
+					throw new RuntimeException(e);
+				}
+			}
+		}finally{
+			connection.close();
+		}
+	}
+
+	/**
+	 * Truncate tables define in xml, then insert data.
+	 */
+	public static void loadDbUnitData(DataSource dataSource, String schema, DBTYPE dbType, Map<String, String> replacementToken, String... xmlPaths) throws DatabaseUnitException, SQLException {
+		if(dbType == DBTYPE.MSSQL) {
+			loadDbUnitData(MsSqlInsertOperation.CLEAN_INSERT, dataSource, schema, dbType, replacementToken, xmlPaths);
+		}else {
+			loadDbUnitData(DatabaseOperationEx.CLEAN_INSERT, dataSource, schema, dbType, replacementToken, xmlPaths);
+		}
+	}
+
+	public static void removeDbUnitData(DataSource dataSource, String schema, DBTYPE dbType, Map<String, String> replacementToken, String... xmlPaths) throws DatabaseUnitException, SQLException {
+		loadDbUnitData(DatabaseOperationEx.DELETE_ALL, dataSource, schema, dbType, replacementToken, xmlPaths);
+	}
+
+	public static void deleteDbUnitData(DataSource dataSource, String schema, DBTYPE dbType, Map<String, String> replacementToken, String... xmlPaths) throws DatabaseUnitException, SQLException {
+		loadDbUnitData(DatabaseOperationEx.DELETE_WHERE, dataSource, schema, dbType, replacementToken, xmlPaths);
+	}
+
+	/**
+	 * Only insert data.
+	 */
+	public static void appendDbUnitData(DataSource dataSource, String schema, DBTYPE dbType, Map<String, String> replacementToken, String... xmlPaths) throws DatabaseUnitException, SQLException {
+		if(dbType == DBTYPE.MSSQL) {
+			loadDbUnitData(MsSqlInsertOperation.INSERT, dataSource, schema, dbType, replacementToken, xmlPaths);
+		}else {
+			loadDbUnitData(DatabaseOperationEx.INSERT, dataSource, schema, dbType, replacementToken, xmlPaths);
+		}
+	}
+}

+ 53 - 0
src/main/java/org/ccframe/commons/util/ESPathUtil.java

@@ -0,0 +1,53 @@
+package org.ccframe.commons.util;
+
+import java.io.File;
+import java.io.IOException;
+//import java.util.regex.Pattern;
+
+import org.apache.commons.io.FileUtils;
+
+import org.ccframe.config.GlobalEx;
+
+/**
+ * ES目录获取和清理工具.
+ * 
+ * @author JIM
+ *
+ */
+public class ESPathUtil {
+	
+	private static final String ES_HOME_DIR = "eshome"; 
+	
+	private static final String ES_DATA_DIR = "esdata"; 
+
+	private static final String ES_DATA_NODES_DIR = "nodes"; 
+
+	private static final String ES_DATA_NODES_DEL_DIR = "nodes_del"; 
+
+//	private static final String DEFAULT_CLUSTER_NAME = "ayhshop";
+	
+	public static String getHomeDir() {
+		return GlobalEx.APP_BASE_DIR + File.separator + "WEB-INF" + File.separator + ES_HOME_DIR;
+	}
+	
+	public static String getDataDir() {
+		return getHomeDir() + File.separator + ES_DATA_DIR;
+	}
+
+	public static String cleanUpCluster(String clusterName) {
+		File dataDir = new File(getDataDir() + File.separator + ES_DATA_NODES_DIR);
+		if(dataDir.exists() && dataDir.isDirectory()) {
+			File deleteDir = new File(getDataDir() + File.separator + ES_DATA_NODES_DEL_DIR);
+			try {
+				if(deleteDir.exists()) {
+					FileUtils.deleteDirectory(deleteDir);
+				}
+				FileUtils.moveDirectory(dataDir, deleteDir);
+				FileUtils.deleteDirectory(deleteDir);
+			} catch (IOException e) {
+				e.printStackTrace();
+			}
+		}
+		return clusterName;
+	}
+}

+ 76 - 0
src/main/java/org/ccframe/commons/util/EnumFromCodeUtil.java

@@ -0,0 +1,76 @@
+package org.ccframe.commons.util;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.springframework.core.io.Resource;
+import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
+import org.springframework.core.io.support.ResourcePatternResolver;
+
+import org.ccframe.config.GlobalEx;
+
+/**
+ * JSTL的enum代码支持。支持API方法可以实现JSP页面JSTL无需使用数据库code判断,而采用ENUM名来判断,例如:
+ * <c:if test="${sdk:enumFromCode('ApproveStatCodeEnum', articleInfProxy.approveStatCode) == 'NOT_SUBMIT'}">未提交</c:if>
+ * <c:if test="${sdk:enumFromCode('ApproveStatCodeEnum', articleInfProxy.approveStatCode) == 'QUEUE'}">未处理</c:if>
+ * <c:if test="${sdk:enumFromCode('ApproveStatCodeEnum', articleInfProxy.approveStatCode) == 'APPROVE'}">同意</c:if>
+ * <c:if test="${sdk:enumFromCode('ApproveStatCodeEnum', articleInfProxy.approveStatCode) == 'DENY'}">拒绝</c:if>
+ * @author JIM
+ *
+ */
+public class EnumFromCodeUtil {
+
+	private static final String CORE_BUSINESS_PACKS[] = {"core", "article"};
+	
+	private static Map<String, Class<Enum<?>>> enumClassMap;
+	
+	public static synchronized void initEnumMap(){
+		try{
+			List<String> coreBusinessPackList = Arrays.asList(CORE_BUSINESS_PACKS);
+			if(enumClassMap == null){
+				enumClassMap = new HashMap<String, Class<Enum<?>>>();
+				ResourcePatternResolver resolver = new PathMatchingResourcePatternResolver();
+		    	for(String coreBusinessPack: coreBusinessPackList) { //读取架构的路径jar,由于打包到jar后必须执行全路径,因此建立数组指定好
+		    		addFiles(resolver, "classpath*:" + GlobalEx.MAIN_PACKAGE.replace('.','/') + "/subsys/"+coreBusinessPack+"/domain/code/*.class");
+		    	}
+				for(Resource dirResource: resolver.getResources("classpath:" + GlobalEx.MAIN_PACKAGE.replace('.','/') + "/subsys/*")){ //扫描class目录,已经读取过的就不用再读
+					if(!coreBusinessPackList.contains(dirResource.getFile().getName())) {
+			    		addFiles(resolver, "classpath*:" + GlobalEx.MAIN_PACKAGE.replace('.','/') + "/subsys/"+dirResource.getFile().getName()+"/domain/code/*.class");
+					}
+		    	}
+			}
+		}catch(Exception e){
+			e.printStackTrace();
+		}
+	}
+
+	@SuppressWarnings("rawtypes")
+	private static void addFiles(ResourcePatternResolver resolver, String locationPattern) throws ClassNotFoundException {
+		try {
+			for(Resource classResource: resolver.getResources(locationPattern)){
+				String uriStr = classResource.getURI().toString();
+				Class scanClass = Class.forName(GlobalEx.MAIN_PACKAGE + "." + uriStr.substring(uriStr.indexOf("subsys"),uriStr.lastIndexOf(".class")).replace('/', '.'));
+				enumClassMap.put(scanClass.getSimpleName(), scanClass);
+			}
+		}catch(IOException e) {
+			return;
+		}
+	}
+	
+	public static Enum<?> enumFromCode(String enumClassName, String code){
+		try {
+			initEnumMap();
+			return (Enum<?>)enumClassMap.get(enumClassName).getMethod("fromCode", String.class).invoke(null, code);
+		} catch (Exception e) {
+			e.printStackTrace();
+		}
+		return null;
+	}
+	
+	public static Map<String, Class<Enum<?>>> getEnumClassMap(){
+		return enumClassMap;
+	}
+}

+ 66 - 0
src/main/java/org/ccframe/commons/util/FlatXmlWriterEx.java

@@ -0,0 +1,66 @@
+package org.ccframe.commons.util;
+
+import org.dbunit.dataset.DataSetException;
+import org.dbunit.dataset.IDataSet;
+import org.dbunit.dataset.ITableMetaData;
+import org.dbunit.dataset.xml.FlatXmlWriter;
+
+import java.io.OutputStream;
+import java.io.UnsupportedEncodingException;
+import java.io.Writer;
+import java.util.HashMap;
+import java.util.Map;
+
+
+public class FlatXmlWriterEx extends FlatXmlWriter {
+
+    /**
+     * Logger for this class
+     */
+//    private static final Logger logger = LoggerFactory.getLogger(FlatXmlWriterEx.class);
+
+	private int totalTableCount;
+	private int processedTableCount = 0;
+	private String processingTableName;
+	private int currentTableTotalCount;
+	private Map<String, Integer> tableRowCountMap = new HashMap<String, Integer>(0);
+	
+	public FlatXmlWriterEx(OutputStream outputStream, String encoding)throws UnsupportedEncodingException {
+		super(outputStream, encoding);
+	}
+	public FlatXmlWriterEx(Writer writer) {
+		super(writer);
+	}
+	public FlatXmlWriterEx(Writer writer, String encoding) {
+		super(writer, encoding);
+	}
+
+	@Override
+	public void startTable(ITableMetaData metaData) throws DataSetException {
+		processedTableCount ++;
+		currentTableTotalCount = tableRowCountMap.get(metaData.getTableName());
+		processingTableName = metaData.getTableName();
+		super.startTable(metaData);
+	}
+	@Override
+	public void write(IDataSet dataSet) throws DataSetException {
+		for(String tableName: dataSet.getTableNames()){
+			tableRowCountMap.put(tableName, 0);
+		}
+		totalTableCount = dataSet.getTableNames().length;
+		super.write(dataSet);
+	}
+
+	public int getTotalTableCount() {
+		return totalTableCount;
+	}
+	public int getProcessedTableCount() {
+		return processedTableCount;
+	}
+	public int getCurrentTableTotalCount() {
+		return currentTableTotalCount;
+	}
+	public String getProcessingTableName() {
+		return processingTableName;
+	}
+}

+ 108 - 0
src/main/java/org/ccframe/commons/util/JsonBinder.java

@@ -0,0 +1,108 @@
+package org.ccframe.commons.util;
+
+import java.io.IOException;
+
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.fasterxml.jackson.annotation.JsonInclude.Include;
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.NullNode;
+
+/**
+ * JSON 转换器.(jackson) 
+ * 
+ * Jim
+ */
+public class JsonBinder {
+    private static Logger logger = LoggerFactory.getLogger(JsonBinder.class);
+
+    private ObjectMapper mapper;
+    private static JsonBinder normalBinder;
+    private static JsonBinder nonNullBinder;
+
+    public JsonNode toNode(String jsonString){
+    	try {
+			return mapper.readTree(jsonString);
+		} catch (IOException e) {
+			logger.error("json parse error:" + jsonString,e);
+			return NullNode.getInstance();
+		}
+    }
+
+    public JsonNode objectToNode(Object object){
+    	return toNode(toJson(object));
+    }
+
+    private JsonBinder(Include inclusion) {
+        mapper = new ObjectMapper();
+        //设置输出时包含属性的风格
+        mapper.setSerializationInclusion(inclusion);
+//        mapper.getSerializationConfig().withSerializationInclusion(inclusion);
+        //设置输入时忽略在JSON字符串中存在但Java对象实际没有的属性
+        mapper.getDeserializationConfig().without(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
+    }
+
+    /**
+     * 创建输出全部属性到Json字符串的Binder.
+     */
+    public static synchronized  JsonBinder buildNormalBinder() {
+        if(normalBinder==null){
+            normalBinder = new JsonBinder(Include.ALWAYS);
+        }
+        return normalBinder;
+    }
+
+    /**
+     * 创建只输出非空属性到Json字符串的Binder.
+     */
+    public static synchronized  JsonBinder buildNonNullBinder() {
+        if(nonNullBinder==null){
+            nonNullBinder = new JsonBinder(Include.NON_NULL);
+        }
+        return nonNullBinder;
+    }
+
+    /**
+     * 如果JSON字符串为Null或"null"字符串,返回Null.
+     * 如果JSON字符串为"[]",返回空集合.
+     * 如果需要返回数组,使用数据类型如Product[].class
+     * <p/>
+     */
+    public <T> T toBean(String jsonString, Class<T> clazz) {
+        if (StringUtils.isEmpty(jsonString)) {
+            return null;
+        }
+        try {
+            return mapper.readValue(jsonString, clazz);
+        } catch (IOException e) {
+            logger.warn("parse json string error:" + jsonString, e);
+            return null;
+        }
+    }
+
+    /**
+     * 如果对象为Null,返回"null".
+     * 如果集合为空集合,返回"[]".
+     */
+    public String toJson(Object object) {
+
+        try {
+            return mapper.writeValueAsString(object);
+        } catch (IOException e) {
+            logger.warn("write to json string error:" + object, e);
+            return null;
+        }
+    }
+
+    /**
+     * 取出Mapper做进一步的设置或使用其他序列化API.
+     */
+    public ObjectMapper getMapper() {
+        return mapper;
+    }
+
+}

+ 132 - 0
src/main/java/org/ccframe/commons/util/JsonUtil.java

@@ -0,0 +1,132 @@
+package org.ccframe.commons.util;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.fasterxml.jackson.annotation.JsonInclude.Include;
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.NullNode;
+
+/**
+ * JSON 转换器.(jackson) 
+ * 
+ * Jim
+ */
+public class JsonUtil {
+    private static Logger logger = LoggerFactory.getLogger(JsonUtil.class);
+
+    private ObjectMapper mapper;
+    private static JsonUtil normalBinder;
+    private static JsonUtil nonNullBinder;
+
+    public JsonNode toNode(String jsonString){
+    	try {
+			return mapper.readTree(jsonString);
+		} catch (IOException e) {
+			logger.error("json parse error:" + jsonString,e);
+			return NullNode.getInstance();
+		}
+    }
+
+    public <T> T convertValue(Map<String, Object> objectMap, Class<T> clazz) {
+    	return mapper.convertValue(objectMap, clazz);
+    }
+    
+    public JsonNode objectToNode(Object object){
+    	return toNode(toJson(object));
+    }
+
+    private JsonUtil(Include inclusion) {
+        mapper = new ObjectMapper();
+        //设置输出时包含属性的风格
+        mapper.setSerializationInclusion(inclusion);
+//        mapper.getSerializationConfig().withSerializationInclusion(inclusion);
+        //设置输入时忽略在JSON字符串中存在但Java对象实际没有的属性
+        mapper.getDeserializationConfig().without(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
+    }
+
+    /**
+     * 创建输出全部属性到Json字符串的Binder.
+     */
+    public static synchronized  JsonUtil buildNormalBinder() {
+        if(normalBinder==null){
+            normalBinder = new JsonUtil(Include.ALWAYS);
+        }
+        return normalBinder;
+    }
+
+    /**
+     * 创建只输出非空属性到Json字符串的Binder.
+     */
+    public static synchronized  JsonUtil buildNonNullBinder() {
+        if(nonNullBinder==null){
+            nonNullBinder = new JsonUtil(Include.NON_NULL);
+        }
+        return nonNullBinder;
+    }
+
+    /**
+     * 如果JSON字符串为Null或"null"字符串,返回Null.
+     * 如果JSON字符串为"[]",返回空集合.
+     * 如果需要返回数组,使用数据类型如Product[].class
+     * <p/>
+     */
+    public <T> T toBean(String jsonString, Class<T> clazz) {
+        if (StringUtils.isEmpty(jsonString)) {
+            return null;
+        }
+        try {
+            return mapper.readValue(jsonString, clazz);
+        } catch (IOException e) {
+            logger.warn("parse json string error:" + jsonString, e);
+            return null;
+        }
+    }
+
+    /**
+     * 支持返回泛型包装类型
+     * @param jsonString
+     * @param reference 传入包装类型T的Reference,例如new TypeReference<List<T>>()
+     * @return
+     */
+    public <T> T toBean(String jsonString, TypeReference<T> reference) {
+        if (StringUtils.isEmpty(jsonString)) {
+            return null;
+        }
+        try {
+            return mapper.readValue(jsonString, reference);
+        } catch (IOException e) {
+            logger.warn("parse json string error:" + jsonString, e);
+            return null;
+        }
+    }
+
+    /**
+     * 如果对象为Null,返回"null".
+     * 如果集合为空集合,返回"[]".
+     */
+    public String toJson(Object object) {
+
+        try {
+            return mapper.writeValueAsString(object);
+        } catch (IOException e) {
+            logger.warn("write to json string error:" + object, e);
+            return null;
+        }
+    }
+
+    /**
+     * 取出Mapper做进一步的设置或使用其他序列化API.
+     */
+    public ObjectMapper getMapper() {
+        return mapper;
+    }
+
+}

+ 49 - 0
src/main/java/org/ccframe/commons/util/JwtUtil.java

@@ -0,0 +1,49 @@
+package org.ccframe.commons.util;
+
+import java.util.Date;
+import java.util.LinkedHashMap;
+
+import org.ccframe.commons.helper.SpringContextHelper;
+import org.ccframe.config.GlobalEx;
+import org.ccframe.subsys.core.service.ParamService;
+
+import io.jsonwebtoken.Jwts;
+import io.jsonwebtoken.SignatureAlgorithm;
+
+/**
+ * JWT工具,gradle添加:
+ * api 'io.jsonwebtoken:jjwt:0.9.1'
+ * 来实现token
+ * 
+ * @author JIM
+ *
+ */
+public class JwtUtil {
+
+	private static final String DATA_KEY = "data";
+
+	private static byte[] algSecretData;
+	
+	public static byte[] getAlgSecretData() { //ALG在初次使用后加载到内存
+		if(algSecretData == null) {
+			algSecretData = SpringContextHelper.getBean(ParamService.class).getById(GlobalEx.PARAM_ID_CLIENT_JWT_ALG).getParamValue();
+		}
+		return algSecretData;
+	}
+
+	public static String createToken(Object dataObject, int maxAgeMinute) {
+		return createToken(dataObject, UtilDateTime.addMinutes(new Date(), maxAgeMinute));
+	}
+
+	public static String createToken(Object dataObject, Date expiresAt) {
+		SignatureAlgorithm signatureAlgorithm = SignatureAlgorithm.HS256;
+		return Jwts.builder().claim(DATA_KEY, dataObject).setExpiration(expiresAt).signWith(signatureAlgorithm, getAlgSecretData()).compact();
+	}
+	
+	public static <T> T decodeData(String token, Class<T> dataClass) {
+		LinkedHashMap<String, Object> dataMap = Jwts.parser().setSigningKey(getAlgSecretData()).parseClaimsJws(token).getBody().get(DATA_KEY, LinkedHashMap.class);
+		T result = JsonUtil.buildNormalBinder().convertValue(dataMap, dataClass);
+		return result;
+	}
+
+}

Niektóre pliki nie zostały wyświetlone z powodu dużej ilości zmienionych plików