فهرست منبع

为数据库存储知识库添加采用spring中配置数据源功能

jacky6024 8 سال پیش
والد
کامیت
0ce9eb9d6f
31فایلهای تغییر یافته به همراه4278 افزوده شده و 7 حذف شده
  1. 32 7
      urule-console/src/main/java/com/bstek/urule/console/repository/RepositoryBuilder.java
  2. 58 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/BaseDbFileSystem.java
  3. 93 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/DatabaseDataStore.java
  4. 1280 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/DbPersistenceManager.java
  5. 40 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/configs/db2.xml
  6. 40 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/configs/derby.xml
  7. 40 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/configs/mssql.xml
  8. 40 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/configs/mysql.xml
  9. 40 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/configs/oracle.xml
  10. 40 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/configs/postgresql.xml
  11. 828 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/journal/DatabaseJournal.java
  12. 167 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/journal/DatabaseRecordIterator.java
  13. 66 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/journal/MSSqlDatabaseJournal.java
  14. 131 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/journal/OracleDatabaseJournal.java
  15. 279 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/manager/DerbyPersistenceManager.java
  16. 69 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/manager/H2PersistenceManager.java
  17. 67 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/manager/MSSqlPersistenceManager.java
  18. 42 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/manager/MySqlPersistenceManager.java
  19. 37 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/manager/Oracle9PersistenceManager.java
  20. 167 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/manager/OraclePersistenceManager.java
  21. 75 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/manager/PostgreSQLPersistenceManager.java
  22. 54 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/store/DerbyDataStore.java
  23. 69 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/system/DB2FileSystem.java
  24. 91 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/system/DerbyFileSystem.java
  25. 78 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/system/MSSqlFileSystem.java
  26. 29 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/system/MysqlFileSystem.java
  27. 37 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/system/Oracle9FileSystem.java
  28. 256 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/system/OracleFileSystem.java
  29. 29 0
      urule-console/src/main/java/com/bstek/urule/console/repository/database/system/PostgreSQLFileSystem.java
  30. 2 0
      urule-console/src/main/resources/urule-console-context.properties
  31. 2 0
      urule-console/src/main/resources/urule-console-context.xml

+ 32 - 7
urule-console/src/main/java/com/bstek/urule/console/repository/RepositoryBuilder.java

@@ -22,6 +22,7 @@ import java.util.logging.Logger;
 
 import javax.jcr.RepositoryException;
 import javax.servlet.ServletContext;
+import javax.sql.DataSource;
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
 
@@ -74,6 +75,9 @@ public class RepositoryBuilder implements InitializingBean,ApplicationContextAwa
 	private RepositoryImpl repository;
 	private String repositoryXml;
 	private ApplicationContext applicationContext;
+	private String repositoryDatasourceName;
+	public static String databaseType;
+	public static DataSource datasource;
 	private Logger log=Logger.getLogger(RepositoryBuilder.class.getName());
 	public RepositoryImpl getRepository() {
 		return repository;
@@ -165,10 +169,11 @@ public class RepositoryBuilder implements InitializingBean,ApplicationContextAwa
 		return loginModuleConfig;
 	}
 	
-	private void initRepositoryByXml()throws Exception {
+	private void initRepositoryByXml(String xml)throws Exception {
+		log.info("Build repository from user custom xml file...");
 		InputStream inputStream=null;
 		try{
-			inputStream=this.applicationContext.getResource(repositoryXml).getInputStream();
+			inputStream=this.applicationContext.getResource(xml).getInputStream();
 			String tempRepoHomeDir=System.getProperty("java.io.tmpdir");
 			if(StringUtils.isNotBlank(tempRepoHomeDir) && tempRepoHomeDir.length()>1){
 				if(tempRepoHomeDir.endsWith("/") || tempRepoHomeDir.endsWith("\\")){
@@ -263,19 +268,32 @@ public class RepositoryBuilder implements InitializingBean,ApplicationContextAwa
 		log.info("Use \""+repoHomeDir+"\" as urule repository home directory.");
 	}
 	public void afterPropertiesSet() throws Exception {
+		if(StringUtils.isNotBlank(repositoryDatasourceName)){
+			RepositoryBuilder.datasource=(DataSource)this.applicationContext.getBean(repositoryDatasourceName);
+		}
 		if(StringUtils.isNotBlank(repoHomeDir) && !repoHomeDir.equals("${urule.repository.dir}")){
 			initRepositoryDir(applicationContext);			
-		}
-		if(StringUtils.isEmpty(repositoryXml)){
+		}else if(StringUtils.isNotBlank(repositoryXml)){
+			initRepositoryByXml(repositoryXml);
+		}else if(RepositoryBuilder.datasource!=null){
+			if(RepositoryBuilder.databaseType==null){
+				throw new RuleException("You need config \"urule.repository.databasetype\" property when use spring datasource!");
+			}
+			initRepositoryFromSpringDatasource();
+		}else{
 			if(StringUtils.isBlank(repoHomeDir)){
 				throw new RuleException("You need config \"urule.repository.dir\" property for set repository home dir.");
 			}
 			initDefaultRepository();
-		}else{
-			log.info("Build repository from user custom xml file...");
-			initRepositoryByXml();
 		}
 	}
+	
+	private void initRepositoryFromSpringDatasource() throws Exception{
+		System.out.println("Init repository from spring datasource ["+repositoryDatasourceName+"] with database type [RepositoryBuilder.databaseType]...");
+		String xml="classpath:com/bstek/urule/console/repository/database/configs/"+RepositoryBuilder.databaseType+".xml";
+		initRepositoryByXml(xml);
+	}
+	
 	public void setRepoHomeDir(String repoHomeDir) {
 		this.repoHomeDir = repoHomeDir;
 	}
@@ -285,6 +303,13 @@ public class RepositoryBuilder implements InitializingBean,ApplicationContextAwa
 		this.repositoryXml = repositoryXml;
 	}
 	
+	public void setDatabaseType(String databaseType) {
+		RepositoryBuilder.databaseType = databaseType;
+	}
+	public void setRepositoryDatasourceName(String repositoryDatasourceName) {
+		this.repositoryDatasourceName = repositoryDatasourceName;
+	}
+	
 	public void destroy(){
 		System.out.println("Shutdown repository...");
 		repository.shutdown();

+ 58 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/BaseDbFileSystem.java

@@ -0,0 +1,58 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database;
+
+import org.apache.jackrabbit.core.fs.FileSystemException;
+import org.apache.jackrabbit.core.fs.db.DbFileSystem;
+
+import com.bstek.urule.console.repository.RepositoryBuilder;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月6日
+ */
+public abstract class BaseDbFileSystem extends DbFileSystem {
+	@Override
+	public void init() throws FileSystemException {
+        if (initialized) {
+            throw new IllegalStateException("already initialized");
+        }
+        try {
+        	setSchema(databaseType());
+            conHelper = createConnectionHelper(RepositoryBuilder.datasource);
+
+            // make sure schemaObjectPrefix consists of legal name characters only
+            schemaObjectPrefix = conHelper.prepareDbIdentifier(schemaObjectPrefix);
+
+            // check if schema objects exist and create them if necessary
+            if (isSchemaCheckEnabled()) {
+                createCheckSchemaOperation().run();
+            }
+
+            // build sql statements
+            buildSQLStatements();
+
+            // finally verify that there's a file system root entry
+            verifyRootExists();
+
+            initialized = true;
+        } catch (Exception e) {
+            String msg = "failed to initialize file system";
+            throw new FileSystemException(msg, e);
+        }
+	}
+	public abstract String databaseType();
+}

+ 93 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/DatabaseDataStore.java

@@ -0,0 +1,93 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Properties;
+
+import org.apache.jackrabbit.core.data.DataStoreException;
+import org.apache.jackrabbit.core.data.db.DbDataStore;
+
+import com.bstek.urule.console.repository.RepositoryBuilder;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月7日
+ */
+public class DatabaseDataStore extends DbDataStore {
+	@Override
+	public synchronized void init(String homeDir) throws DataStoreException {
+        try {
+            initDatabaseType();
+            conHelper = createConnectionHelper(RepositoryBuilder.datasource);
+            if (isSchemaCheckEnabled()) {
+                createCheckSchemaOperation().run();
+            }
+        } catch (Exception e) {
+            throw convert("Can not init data store, driver=" + driver + " url=" + url + " user=" + user +
+                    " schemaObjectPrefix=" + schemaObjectPrefix + " tableSQL=" + tableSQL + " createTableSQL=" + createTableSQL, e);
+        }
+	}
+	
+	@Override
+	protected void initDatabaseType() throws DataStoreException {
+		databaseType=RepositoryBuilder.databaseType;
+        InputStream in =
+            DbDataStore.class.getResourceAsStream(databaseType + ".properties");
+        if (in == null) {
+        	String msg =
+        			"Configuration error: The resource '" + databaseType
+        			+ ".properties' could not be found;"
+        			+ " Please verify the databaseType property";
+        	throw new DataStoreException(msg);
+        }
+        Properties prop = new Properties();
+        try {
+            try {
+                prop.load(in);
+            } finally {
+            in.close();
+            }
+        } catch (IOException e) {
+            String msg = "Configuration error: Could not read properties '" + databaseType + ".properties'";
+            throw new DataStoreException(msg, e);
+        }
+        if (driver == null) {
+            driver = getProperty(prop, "driver", driver);
+        }
+        tableSQL = getProperty(prop, "table", tableSQL);
+        createTableSQL = getProperty(prop, "createTable", createTableSQL);
+        insertTempSQL = getProperty(prop, "insertTemp", insertTempSQL);
+        updateDataSQL = getProperty(prop, "updateData", updateDataSQL);
+        updateLastModifiedSQL = getProperty(prop, "updateLastModified", updateLastModifiedSQL);
+        updateSQL = getProperty(prop, "update", updateSQL);
+        deleteSQL = getProperty(prop, "delete", deleteSQL);
+        deleteOlderSQL = getProperty(prop, "deleteOlder", deleteOlderSQL);
+        selectMetaSQL = getProperty(prop, "selectMeta", selectMetaSQL);
+        selectAllSQL = getProperty(prop, "selectAll", selectAllSQL);
+        selectDataSQL = getProperty(prop, "selectData", selectDataSQL);
+        storeStream = getProperty(prop, "storeStream", storeStream);
+        if (!STORE_SIZE_MINUS_ONE.equals(storeStream)
+                && !STORE_TEMP_FILE.equals(storeStream)
+                && !STORE_SIZE_MAX.equals(storeStream)) {
+            String msg = "Unsupported Stream store mechanism: " + storeStream
+                    + " supported are: " + STORE_SIZE_MINUS_ONE + ", "
+                    + STORE_TEMP_FILE + ", " + STORE_SIZE_MAX;
+            throw new DataStoreException(msg);
+        }
+	}
+}

+ 1280 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/DbPersistenceManager.java

@@ -0,0 +1,1280 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.jcr.RepositoryException;
+import javax.sql.DataSource;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.core.fs.FileSystem;
+import org.apache.jackrabbit.core.fs.FileSystemResource;
+import org.apache.jackrabbit.core.fs.local.LocalFileSystem;
+import org.apache.jackrabbit.core.id.NodeId;
+import org.apache.jackrabbit.core.id.PropertyId;
+import org.apache.jackrabbit.core.persistence.PMContext;
+import org.apache.jackrabbit.core.persistence.bundle.AbstractBundlePersistenceManager;
+import org.apache.jackrabbit.core.persistence.pool.BundleDbPersistenceManager;
+import org.apache.jackrabbit.core.persistence.pool.DbNameIndex;
+import org.apache.jackrabbit.core.persistence.util.BLOBStore;
+import org.apache.jackrabbit.core.persistence.util.BundleBinding;
+import org.apache.jackrabbit.core.persistence.util.ErrorHandling;
+import org.apache.jackrabbit.core.persistence.util.FileSystemBLOBStore;
+import org.apache.jackrabbit.core.persistence.util.NodeInfo;
+import org.apache.jackrabbit.core.persistence.util.NodePropBundle;
+import org.apache.jackrabbit.core.persistence.util.Serializer;
+import org.apache.jackrabbit.core.state.ChangeLog;
+import org.apache.jackrabbit.core.state.ItemStateException;
+import org.apache.jackrabbit.core.state.NoSuchItemStateException;
+import org.apache.jackrabbit.core.state.NodeReferences;
+import org.apache.jackrabbit.core.util.StringIndex;
+import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
+import org.apache.jackrabbit.core.util.db.ConnectionFactory;
+import org.apache.jackrabbit.core.util.db.ConnectionHelper;
+import org.apache.jackrabbit.core.util.db.DatabaseAware;
+import org.apache.jackrabbit.core.util.db.DbUtility;
+import org.apache.jackrabbit.core.util.db.StreamWrapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.bstek.urule.console.repository.RepositoryBuilder;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月7日
+ */
+public class DbPersistenceManager extends AbstractBundlePersistenceManager implements DatabaseAware {
+
+    /** the default logger */
+    private static Logger log = LoggerFactory.getLogger(BundleDbPersistenceManager.class);
+
+    /** storage model modifier: binary keys */
+    public static final int SM_BINARY_KEYS = 1;
+
+    /** storage model modifier: longlong keys */
+    public static final int SM_LONGLONG_KEYS = 2;
+
+    /** flag indicating if this manager was initialized */
+    protected boolean initialized;
+
+    /** the jdbc driver name */
+    protected String driver;
+
+    /** the jdbc url string */
+    protected String url;
+
+    /** the jdbc user */
+    protected String user;
+
+    /** the jdbc password */
+    protected String password;
+
+    /** the database type */
+    protected String databaseType;
+
+    /** the logical name of the data source to use */
+    protected String dataSourceName;
+
+    /** the {@link ConnectionHelper} set in the {@link #init(PMContext)} method */
+    protected ConnectionHelper conHelper;
+
+    /** the prefix for the database objects */
+    protected String schemaObjectPrefix;
+
+    /** flag indicating if a consistency check should be issued during startup */
+    protected boolean consistencyCheck;
+
+    /** flag indicating if the consistency check should attempt to fix issues */
+    protected boolean consistencyFix;
+
+    /** initial size of buffer used to serialize objects */
+    protected static final int INITIAL_BUFFER_SIZE = 1024;
+
+    /** indicates if uses (filesystem) blob store */
+    protected boolean externalBLOBs;
+
+    /** indicates whether to block if the database connection is lost */
+    protected boolean blockOnConnectionLoss;
+
+    // SQL statements for bundle management
+    protected String bundleInsertSQL;
+    protected String bundleUpdateSQL;
+    protected String bundleSelectSQL;
+    protected String bundleDeleteSQL;
+    protected String bundleSelectAllIdsFromSQL;
+    protected String bundleSelectAllIdsSQL;
+    protected String bundleSelectAllBundlesFromSQL;
+    protected String bundleSelectAllBundlesSQL;
+
+    // SQL statements for NodeReference management
+    protected String nodeReferenceInsertSQL;
+    protected String nodeReferenceUpdateSQL;
+    protected String nodeReferenceSelectSQL;
+    protected String nodeReferenceDeleteSQL;
+
+    /** file system where BLOB data is stored */
+    protected CloseableBLOBStore blobStore;
+
+    /** the index for local names */
+    private StringIndex nameIndex;
+
+    /**
+     * the minimum size of a property until it gets written to the blob store
+     * @see #setMinBlobSize(String)
+     */
+    private int minBlobSize = 0x1000;
+
+    /**
+     * flag for error handling
+     */
+    protected ErrorHandling errorHandling = new ErrorHandling();
+
+    /**
+     * the bundle binding
+     */
+    protected BundleBinding binding;
+
+    /**
+     * the name of this persistence manager
+     */
+    private String name = super.toString();
+
+    /**
+     * Whether the schema check must be done during initialization.
+     */
+    private boolean schemaCheckEnabled = true;
+/*
+    *//**
+     * The repositories {@link ConnectionFactory}.
+     *//*
+    private ConnectionFactory connectionFactory;*/
+
+    /**
+     * {@inheritDoc}
+     */
+    public void setConnectionFactory(ConnectionFactory connectionFactory) {
+        //this.connectionFactory = connectionFactory;
+    }
+
+    /**
+     * Returns the configured JDBC connection url.
+     * @return the configured JDBC connection url.
+     */
+    public String getUrl() {
+        return url;
+    }
+
+    /**
+     * Sets the JDBC connection URL.
+     * The connection can be created using a JNDI Data Source as well.
+     * To do that, the driver class name must reference a javax.naming.Context class
+     * (for example javax.naming.InitialContext), and the URL must be the JNDI URL
+     * (for example java:comp/env/jdbc/Test).
+     *
+     * @param url the url to set.
+     */
+    public void setUrl(String url) {
+        this.url = url;
+    }
+
+    /**
+     * Returns the configured user that is used to establish JDBC connections.
+     * @return the JDBC user.
+     */
+    public String getUser() {
+        return user;
+    }
+
+    /**
+     * Sets the user name that will be used to establish JDBC connections.
+     * @param user the user name.
+     */
+    public void setUser(String user) {
+        this.user = user;
+    }
+
+    /**
+     * Returns the configured password that is used to establish JDBC connections.
+     * @return the password.
+     */
+    public String getPassword() {
+        return password;
+    }
+
+    /**
+     * Sets the password that will be used to establish JDBC connections.
+     * @param password the password for the connection
+     */
+    public void setPassword(String password) {
+        this.password = password;
+    }
+
+    /**
+     * Returns the class name of the JDBC driver.
+     * @return the class name of the JDBC driver.
+     */
+    public String getDriver() {
+        return driver;
+    }
+
+    /**
+     * Sets the class name of the JDBC driver. The driver class will be loaded
+     * during {@link #init(PMContext) init} in order to assure the existence.
+     * If no driver is specified, the default driver for the database is used.
+     *
+     * @param driver the class name of the driver
+     */
+    public void setDriver(String driver) {
+        this.driver = driver;
+    }
+
+    /**
+     * Returns the configured schema object prefix.
+     * @return the configured schema object prefix.
+     */
+    public String getSchemaObjectPrefix() {
+        return schemaObjectPrefix;
+    }
+
+    /**
+     * Sets the schema object prefix. This string is used to prefix all schema
+     * objects, like tables and indexes. this is useful, if several persistence
+     * managers use the same database.
+     *
+     * @param schemaObjectPrefix the prefix for schema objects.
+     */
+    public void setSchemaObjectPrefix(String schemaObjectPrefix) {
+        // make sure prefix is all uppercase
+        this.schemaObjectPrefix = schemaObjectPrefix.toUpperCase();
+    }
+
+    /**
+     * Returns the configured database type name.
+     * @deprecated
+     * This method is deprecated; {@link getDatabaseType} should be used instead.
+     * 
+     * @return the database type name.
+     */
+    public String getSchema() {
+        return databaseType;
+    }
+
+    /**
+     * Returns the configured database type name.
+     * @return the database type name.
+     */
+    public String getDatabaseType() {
+        return databaseType;
+    }
+
+    /**
+     * Sets the database type. This identifier is used to load and execute
+     * the respective .ddl resource in order to create the required schema
+     * objects.
+     * @deprecated
+     * This method is deprecated; {@link setDatabaseType} should be used instead.
+     *
+     * @param databaseType database type name
+     */
+    public void setSchema(String databaseType) {
+        this.databaseType = databaseType;
+    }
+    
+    /**
+     * Sets the database type. This identifier is used to load and execute
+     * the respective .ddl resource in order to create the required schema
+     * objects.
+     *
+     * @param databaseType database type name
+     */
+    public void setDatabaseType(String databaseType) {
+        this.databaseType = databaseType;
+    }
+
+    public String getDataSourceName() {
+        return dataSourceName;
+    }
+
+    public void setDataSourceName(String dataSourceName) {
+        this.dataSourceName = dataSourceName;
+    }
+
+    /**
+     * Returns if uses external (filesystem) blob store.
+     * @return if uses external (filesystem) blob store.
+     */
+    public boolean isExternalBLOBs() {
+        return externalBLOBs;
+    }
+
+    /**
+     * Sets the flag for external (filesystem) blob store usage.
+     * @param externalBLOBs a value of "true" indicates that an external blob
+     *        store is to be used.
+     */
+    public void setExternalBLOBs(boolean externalBLOBs) {
+        this.externalBLOBs = externalBLOBs;
+    }
+
+    /**
+     * Checks if consistency check is enabled.
+     * @return <code>true</code> if consistency check is enabled.
+     */
+    public String getConsistencyCheck() {
+        return Boolean.toString(consistencyCheck);
+    }
+
+    /**
+     * Defines if a consistency check is to be performed on initialization.
+     * @param consistencyCheck the consistency check flag.
+     */
+    public void setConsistencyCheck(String consistencyCheck) {
+        this.consistencyCheck = Boolean.valueOf(consistencyCheck).booleanValue();
+    }
+
+    /**
+     * Checks if consistency fix is enabled.
+     * @return <code>true</code> if consistency fix is enabled.
+     */
+    public String getConsistencyFix() {
+        return Boolean.toString(consistencyFix);
+    }
+
+    /**
+     * Defines if the consistency check should attempt to fix issues that
+     * it finds.
+     *
+     * @param consistencyFix the consistency fix flag.
+     */
+    public void setConsistencyFix(String consistencyFix) {
+        this.consistencyFix = Boolean.valueOf(consistencyFix).booleanValue();
+    }
+
+    /**
+     * Returns the minimum blob size in bytes.
+     * @return the minimum blob size in bytes.
+     */
+    public String getMinBlobSize() {
+        return String.valueOf(minBlobSize);
+    }
+
+    /**
+     * Sets the minimum blob size. This size defines the threshold of which
+     * size a property is included in the bundle or is stored in the blob store.
+     *
+     * @param minBlobSize the minimum blob size in bytes.
+     */
+    public void setMinBlobSize(String minBlobSize) {
+        this.minBlobSize = Integer.decode(minBlobSize).intValue();
+    }
+
+    /**
+     * Sets the error handling behaviour of this manager. See {@link ErrorHandling}
+     * for details about the flags.
+     *
+     * @param errorHandling the error handling flags
+     */
+    public void setErrorHandling(String errorHandling) {
+        this.errorHandling = new ErrorHandling(errorHandling);
+    }
+
+    /**
+     * Returns the error handling configuration of this manager
+     * @return the error handling configuration of this manager
+     */
+    public String getErrorHandling() {
+        return errorHandling.toString();
+    }
+
+    public void setBlockOnConnectionLoss(String block) {
+        this.blockOnConnectionLoss = Boolean.valueOf(block).booleanValue();
+    }
+
+    public String getBlockOnConnectionLoss() {
+        return Boolean.toString(blockOnConnectionLoss);
+    }
+
+    /**
+     * Returns <code>true</code> if the blobs are stored in the DB.
+     * @return <code>true</code> if the blobs are stored in the DB.
+     */
+    public boolean useDbBlobStore() {
+        return !externalBLOBs;
+    }
+
+    /**
+     * Returns <code>true</code> if the blobs are stored in the local fs.
+     * @return <code>true</code> if the blobs are stored in the local fs.
+     */
+    public boolean useLocalFsBlobStore() {
+        return externalBLOBs;
+    }
+
+    /**
+     * @return whether the schema check is enabled
+     */
+    public final boolean isSchemaCheckEnabled() {
+        return schemaCheckEnabled;
+    }
+
+    /**
+     * @param enabled set whether the schema check is enabled
+     */
+    public final void setSchemaCheckEnabled(boolean enabled) {
+        schemaCheckEnabled = enabled;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * Basically wraps a JDBC transaction around super.store().
+     * 
+     * FIXME: the retry logic is almost a duplicate of {@code ConnectionHelper.RetryManager}.
+     */
+    public synchronized void store(final ChangeLog changeLog) throws ItemStateException {
+        int failures = 0;
+        ItemStateException lastException = null;
+        boolean sleepInterrupted = false;
+        while (!sleepInterrupted && (blockOnConnectionLoss || failures <= 1)) {
+            try {
+                conHelper.startBatch();
+                super.store(changeLog);
+                conHelper.endBatch(true);
+                return;
+            } catch (SQLException e) {
+                // Either startBatch or stopBatch threw it: either way the
+                // transaction was not persisted and no action needs to be taken.
+                lastException = new ItemStateException(e.getMessage(), e);
+            } catch (ItemStateException e) {
+                // store call threw it: we need to cancel the transaction
+                lastException = e;
+                try {
+                    conHelper.endBatch(false);
+                } catch (SQLException e2) {
+                    DbUtility.logException("rollback failed", e2);
+                }
+
+                // if we got here due to a constraint violation and we
+                // are running in test mode, we really want to stop
+                assert !isIntegrityConstraintViolation(e.getCause());
+            }
+            failures++;
+            log.error("Failed to persist ChangeLog (stacktrace on DEBUG log level), blockOnConnectionLoss = "
+                    + blockOnConnectionLoss + ": " + lastException);
+            log.debug("Failed to persist ChangeLog", lastException);
+            if (blockOnConnectionLoss || failures <= 1) { // if we're going to try again
+                try {
+                    Thread.sleep(100);
+                } catch (InterruptedException e1) {
+                    Thread.currentThread().interrupt();
+                    sleepInterrupted = true;
+                    log.error("Interrupted: canceling retry of ChangeLog storage");
+                }
+            }
+        }
+        throw lastException;
+    }
+
+    private boolean isIntegrityConstraintViolation(Throwable t) {
+        if (t instanceof SQLException) {
+            String state = ((SQLException) t).getSQLState();
+            return state != null && state.startsWith("23");
+        } else {
+            return false;
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public void init(PMContext context) throws Exception {
+        if (initialized) {
+            throw new IllegalStateException("already initialized");
+        }
+        super.init(context);
+
+        conHelper = createConnectionHelper(getDataSource());
+        
+        this.name = context.getHomeDir().getName();        
+
+        // make sure schemaObjectPrefix consists of legal name characters only
+        schemaObjectPrefix = conHelper.prepareDbIdentifier(schemaObjectPrefix);
+
+        // check if schema objects exist and create them if necessary
+        if (isSchemaCheckEnabled()) {
+            createCheckSchemaOperation().run();
+        }
+
+        // create correct blob store
+        blobStore = createBlobStore();
+
+        buildSQLStatements();
+
+        // load namespaces
+        binding = new BundleBinding(errorHandling, blobStore, getNsIndex(), getNameIndex(), context.getDataStore());
+        binding.setMinBlobSize(minBlobSize);
+
+        initialized = true;
+
+        if (consistencyCheck) {
+            // check all bundles
+            checkConsistency(null, true, consistencyFix);
+        }
+                
+    }
+
+    private DataSource getDataSource() throws Exception {
+    	setDatabaseType(RepositoryBuilder.databaseType);
+    	return RepositoryBuilder.datasource;
+    }
+
+    /**
+     * This method is called from the {@link #init(PMContext)} method of this class and returns a
+     * {@link ConnectionHelper} instance which is assigned to the {@code conHelper} field. Subclasses may
+     * override it to return a specialized connection helper.
+     * 
+     * @param dataSrc the {@link DataSource} of this persistence manager
+     * @return a {@link ConnectionHelper}
+     * @throws Exception on error
+     */
+    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
+        return new ConnectionHelper(dataSrc, blockOnConnectionLoss);
+    }
+
+    /**
+     * This method is called from {@link #init(PMContext)} after the
+     * {@link #createConnectionHelper(DataSource)} method, and returns a default {@link CheckSchemaOperation}.
+     * Subclasses can overrride this implementation to get a customized implementation.
+     * 
+     * @return a new {@link CheckSchemaOperation} instance
+     */
+    protected CheckSchemaOperation createCheckSchemaOperation() {
+        InputStream in =
+            AbstractBundlePersistenceManager.class.getResourceAsStream(
+                    databaseType + ".ddl");
+        return new CheckSchemaOperation(conHelper, in, schemaObjectPrefix + "BUNDLE").addVariableReplacement(
+            CheckSchemaOperation.SCHEMA_OBJECT_PREFIX_VARIABLE, schemaObjectPrefix);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected BLOBStore getBlobStore() {
+        return blobStore;
+    }
+
+    /**
+     * Creates a suitable blobstore
+     * @return a blobstore
+     * @throws Exception if an unspecified error occurs
+     */
+    protected CloseableBLOBStore createBlobStore() throws Exception {
+        if (useLocalFsBlobStore()) {
+            return createLocalFSBlobStore(context);
+        } else {
+            return createDBBlobStore(context);
+        }
+    }
+
+    /**
+     * Returns the local name index
+     * @return the local name index
+     * @throws IllegalStateException if an error occurs.
+     */
+    public StringIndex getNameIndex() {
+        try {
+            if (nameIndex == null) {
+                FileSystemResource res = new FileSystemResource(context.getFileSystem(), RES_NAME_INDEX);
+                if (res.exists()) {
+                    nameIndex = super.getNameIndex();
+                } else {
+                    // create db nameindex
+                    nameIndex = createDbNameIndex();
+                }
+            }
+            return nameIndex;
+        } catch (Exception e) {
+            IllegalStateException exception =
+                new IllegalStateException("Unable to create nsIndex");
+            exception.initCause(e);
+            throw exception;
+        }
+    }
+
+    /**
+     * Returns a new instance of a DbNameIndex.
+     * @return a new instance of a DbNameIndex.
+     * @throws SQLException if an SQL error occurs.
+     */
+    protected DbNameIndex createDbNameIndex() throws SQLException {
+        return new DbNameIndex(conHelper, schemaObjectPrefix);
+    }
+
+    /**
+     * returns the storage model
+     * @return the storage model
+     */
+    public int getStorageModel() {
+        return SM_BINARY_KEYS;
+    }
+
+    /**
+     * Creates a blob store that is based on a local fs. This is called by
+     * init if {@link #useLocalFsBlobStore()} returns <code>true</code>.
+     *
+     * @param context the persistence manager context
+     * @return a blob store
+     * @throws Exception if an error occurs.
+     */
+    protected CloseableBLOBStore createLocalFSBlobStore(PMContext context)
+            throws Exception {
+        /**
+         * store blob's in local file system in a sub directory
+         * of the workspace home directory
+         */
+        LocalFileSystem blobFS = new LocalFileSystem();
+        blobFS.setRoot(new File(context.getHomeDir(), "blobs"));
+        blobFS.init();
+        return new FSBlobStore(blobFS);
+    }
+
+    /**
+     * Creates a blob store that uses the database. This is called by
+     * init if {@link #useDbBlobStore()} returns <code>true</code>.
+     *
+     * @param context the persistence manager context
+     *
+     * @return a blob store
+     * @throws Exception if an error occurs.
+     */
+    protected CloseableBLOBStore createDBBlobStore(PMContext context)
+            throws Exception {
+        return new DbBlobStore();
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public synchronized void close() throws Exception {
+        if (!initialized) {
+            throw new IllegalStateException("not initialized");
+        }
+
+        try {
+            if (nameIndex instanceof DbNameIndex) {
+                ((DbNameIndex) nameIndex).close();
+            }            
+            // close blob store
+            blobStore.close();
+            blobStore = null;
+            super.close();
+        } finally {
+            initialized = false;
+        }
+    }
+
+    /**
+     * Constructs a parameter list for a PreparedStatement
+     * for the given node identifier.
+     *
+     * @param id the node id
+     * @return a list of Objects
+     */
+    protected Object[] getKey(NodeId id) {
+        if (getStorageModel() == SM_BINARY_KEYS) {
+            return new Object[] { id.getRawBytes() };
+        } else {
+            return new Object[] {
+                    id.getMostSignificantBits(), id.getLeastSignificantBits() };
+        }
+    }
+
+    /**
+     * Creates a parameter array for an SQL statement that needs
+     * (i) a node identifier, and (2) another parameter.
+     *
+     * @param id the node id
+     * @param p the other parameter
+     * @param before whether the other parameter should be before the uuid parameter
+     * @return an Object array that represents the parameters
+     */
+    protected Object[] createParams(NodeId id, Object p, boolean before) {
+
+        // Create the key
+        List<Object> key = new ArrayList<Object>();
+        if (getStorageModel() == SM_BINARY_KEYS) {
+            key.add(id.getRawBytes());
+        } else {
+            key.add(id.getMostSignificantBits());
+            key.add(id.getLeastSignificantBits());
+        }
+
+        // Create the parameters
+        List<Object> params = new ArrayList<Object>();
+        if (before) {
+            params.add(p);
+            params.addAll(key);
+        } else {
+            params.addAll(key);
+            params.add(p);
+        }
+
+        return params.toArray();
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public synchronized List<NodeId> getAllNodeIds(NodeId bigger, int maxCount)
+            throws ItemStateException, RepositoryException {
+        ResultSet rs = null;
+        try {
+            String sql = bundleSelectAllIdsSQL;
+            NodeId lowId = null;
+            Object[] keys = new Object[0];
+            if (bigger != null) {
+                sql = bundleSelectAllIdsFromSQL;
+                lowId = bigger;
+                keys = getKey(bigger);
+            }
+            if (getStorageModel() == SM_LONGLONG_KEYS  && maxCount > 0) {
+                // get some more rows, in case the first row is smaller
+                // only required for SM_LONGLONG_KEYS
+                // probability is very low to get get the wrong first key, < 1 : 2^64
+                // see also bundleSelectAllIdsFrom SQL statement
+                maxCount += 10;
+            }
+            rs = conHelper.exec(sql, keys, false, maxCount);
+            ArrayList<NodeId> result = new ArrayList<NodeId>();
+            while ((maxCount == 0 || result.size() < maxCount) && rs.next()) {
+                NodeId current;
+                if (getStorageModel() == SM_BINARY_KEYS) {
+                    current = new NodeId(rs.getBytes(1));
+                } else {
+                    long high = rs.getLong(1);
+                    long low = rs.getLong(2);
+                    current = new NodeId(high, low);
+                    if (lowId != null) {
+                        // skip the keys that are smaller or equal (see above, maxCount += 10)
+                        // only required for SM_LONGLONG_KEYS
+                        if (current.compareTo(lowId) <= 0) {
+                            continue;
+                        }
+                    }
+                }
+                result.add(current);
+            }
+            return result;
+        } catch (SQLException e) {
+            String msg = "getAllNodeIds failed.";
+            log.error(msg, e);
+            throw new ItemStateException(msg, e);
+        } finally {
+            DbUtility.close(rs);
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public synchronized Map<NodeId, NodeInfo> getAllNodeInfos(NodeId bigger, int maxCount) throws ItemStateException {
+        ResultSet rs = null;
+        try {
+            String sql = bundleSelectAllBundlesSQL;
+            NodeId lowId = null;
+            Object[] keys = new Object[0];
+            if (bigger != null) {
+                sql = bundleSelectAllBundlesFromSQL;
+                lowId = bigger;
+                keys = getKey(bigger);
+            }
+            if (getStorageModel() == SM_LONGLONG_KEYS && maxCount > 0) {
+                // get some more rows, in case the first row is smaller
+                // only required for SM_LONGLONG_KEYS
+                // probability is very low to get get the wrong first key, < 1 : 2^64
+                // see also bundleSelectAllIdsFrom SQL statement
+                maxCount += 10;
+            }
+            rs = conHelper.exec(sql, keys, false, maxCount);
+            Map<NodeId, NodeInfo> result = new LinkedHashMap<NodeId, NodeInfo>(maxCount);
+            while ((maxCount == 0 || result.size() < maxCount) && rs.next()) {
+                NodeId current;
+                if (getStorageModel() == SM_BINARY_KEYS) {
+                    current = new NodeId(rs.getBytes(1));
+                } else {
+                    long high = rs.getLong(1);
+                    long low = rs.getLong(2);
+                    current = new NodeId(high, low);
+                }
+                if (getStorageModel() == SM_LONGLONG_KEYS && lowId != null) {
+                    // skip the keys that are smaller or equal (see above, maxCount += 10)
+                    if (current.compareTo(lowId) <= 0) {
+                        continue;
+                    }
+                }
+                NodePropBundle bundle = readBundle(current, rs, getStorageModel() == SM_LONGLONG_KEYS ? 3 : 2);
+                NodeInfo nodeInfo = new NodeInfo(bundle);
+                result.put(nodeInfo.getId(), nodeInfo);
+            }
+            return result;
+        } catch (SQLException e) {
+            String msg = "getAllNodeIds failed.";
+            log.error(msg, e);
+            throw new ItemStateException(msg, e);
+        } finally {
+            DbUtility.close(rs);
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected NodePropBundle loadBundle(NodeId id) throws ItemStateException {
+        try {
+            ResultSet rs =
+                conHelper.exec(bundleSelectSQL, getKey(id), false, 0);
+            try {
+                if (rs != null && rs.next()) {
+                    return readBundle(id, rs, 1);
+                } else {
+                    return null;
+                }
+            } finally {
+            	if (rs != null) {
+            		rs.close();
+            	}
+            }
+        } catch (SQLException e) {
+        	String msg = "failed to read bundle (stacktrace on DEBUG log level): " + id + ": " + e; 
+            log.error(msg);
+            log.debug("failed to read bundle: " + id, e);
+            throw new ItemStateException(msg, e);
+        }
+    }
+
+    /**
+     * Reads and parses a bundle from the BLOB in the given column of the
+     * current row of the given result set. This is a helper method to
+     * circumvent issues JCR-1039 and JCR-1474.
+     *
+     * @param id bundle identifier
+     * @param rs result set
+     * @param column BLOB column
+     * @return parsed bundle
+     * @throws SQLException if the bundle can not be read or parsed
+     */
+    private NodePropBundle readBundle(NodeId id, ResultSet rs, int column)
+            throws SQLException {
+        try {
+            InputStream in;
+            if (rs.getMetaData().getColumnType(column) == Types.BLOB) {
+                in = rs.getBlob(column).getBinaryStream();
+            } else {
+                in = rs.getBinaryStream(column);
+            }
+            try {
+                return binding.readBundle(in, id);
+            } finally {
+                in.close();
+            }
+        } catch (IOException e) {
+            SQLException exception =
+                new SQLException("Failed to parse bundle " + id);
+            exception.initCause(e);
+            throw exception;
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    protected synchronized void storeBundle(NodePropBundle bundle) throws ItemStateException {
+        try {
+            ByteArrayOutputStream out =
+                new ByteArrayOutputStream(INITIAL_BUFFER_SIZE);
+            binding.writeBundle(out, bundle);
+
+            String sql = bundle.isNew() ? bundleInsertSQL : bundleUpdateSQL;
+            Object[] params = createParams(bundle.getId(), out.toByteArray(), true);
+            conHelper.update(sql, params);
+        } catch (Exception e) {
+            String msg;
+
+            if (isIntegrityConstraintViolation(e)) {
+                // we should never get an integrity constraint violation here
+                // other PMs may not be able to detect this and end up with
+                // corrupted data
+                msg = "FATAL error while writing the bundle: " + bundle.getId();
+            } else {
+                msg = "failed to write bundle: " + bundle.getId();
+            }
+
+            log.error(msg, e);
+            throw new ItemStateException(msg, e);
+        }
+   }
+
+    /**
+     * {@inheritDoc}
+     */
+    protected synchronized void destroyBundle(NodePropBundle bundle) throws ItemStateException {
+        try {
+            conHelper.update(bundleDeleteSQL, getKey(bundle.getId()));
+        } catch (Exception e) {
+            if (e instanceof NoSuchItemStateException) {
+                throw (NoSuchItemStateException) e;
+            }
+            String msg = "failed to delete bundle: " + bundle.getId();
+            log.error(msg, e);
+            throw new ItemStateException(msg, e);
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public synchronized NodeReferences loadReferencesTo(NodeId targetId)
+            throws NoSuchItemStateException, ItemStateException {
+        if (!initialized) {
+            throw new IllegalStateException("not initialized");
+        }
+
+        ResultSet rs = null;
+        InputStream in = null;
+        try {
+            rs = conHelper.exec(nodeReferenceSelectSQL, getKey(targetId), false, 0);
+            if (!rs.next()) {
+                throw new NoSuchItemStateException(targetId.toString());
+            }
+
+            in = rs.getBinaryStream(1);
+            NodeReferences refs = new NodeReferences(targetId);
+            Serializer.deserialize(refs, in);
+
+            return refs;
+        } catch (Exception e) {
+            if (e instanceof NoSuchItemStateException) {
+                throw (NoSuchItemStateException) e;
+            }
+            String msg = "failed to read references: " + targetId;
+            log.error(msg, e);
+            throw new ItemStateException(msg, e);
+        } finally {
+            IOUtils.closeQuietly(in);
+            DbUtility.close(rs);
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * This method uses shared <code>PreparedStatements</code>, which must
+     * be used strictly sequentially. Because this method synchronizes on the
+     * persistence manager instance, there is no need to synchronize on the
+     * shared statement. If the method would not be synchronized, the shared
+     * statement must be synchronized.
+     */
+    public synchronized void store(NodeReferences refs) throws ItemStateException {
+        if (!initialized) {
+            throw new IllegalStateException("not initialized");
+        }
+
+        // check if insert or update
+        boolean update = existsReferencesTo(refs.getTargetId());
+        String sql = (update) ? nodeReferenceUpdateSQL : nodeReferenceInsertSQL;
+
+        try {
+            ByteArrayOutputStream out =
+                    new ByteArrayOutputStream(INITIAL_BUFFER_SIZE);
+            // serialize references
+            Serializer.serialize(refs, out);
+
+            Object[] params = createParams(refs.getTargetId(), out.toByteArray(), true);
+            conHelper.exec(sql, params);
+            
+            // there's no need to close a ByteArrayOutputStream
+            //out.close();
+        } catch (Exception e) {
+            String msg = "failed to write " + refs;
+            log.error(msg, e);
+            throw new ItemStateException(msg, e);
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public synchronized void destroy(NodeReferences refs) throws ItemStateException {
+        if (!initialized) {
+            throw new IllegalStateException("not initialized");
+        }
+
+        try {
+            conHelper.exec(nodeReferenceDeleteSQL, getKey(refs.getTargetId()));
+        } catch (Exception e) {
+            if (e instanceof NoSuchItemStateException) {
+                throw (NoSuchItemStateException) e;
+            }
+            String msg = "failed to delete " + refs;
+            log.error(msg, e);
+            throw new ItemStateException(msg, e);
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public synchronized boolean existsReferencesTo(NodeId targetId) throws ItemStateException {
+        if (!initialized) {
+            throw new IllegalStateException("not initialized");
+        }
+
+        ResultSet rs = null;
+        try {
+            rs = conHelper.exec(nodeReferenceSelectSQL, getKey(targetId), false, 0);
+
+            // a reference exists if the result has at least one entry
+            return rs.next();
+        } catch (Exception e) {
+            String msg = "failed to check existence of node references: "
+                + targetId;
+            log.error(msg, e);
+            throw new ItemStateException(msg, e);
+        } finally {
+            DbUtility.close(rs);
+        }
+    }
+
+    /**
+     * @inheritDoc
+     */
+    public String toString() {
+        return name;
+    }
+
+    /**
+     * Initializes the SQL strings.
+     */
+    protected void buildSQLStatements() {
+        // prepare statements
+        if (getStorageModel() == SM_BINARY_KEYS) {
+            bundleInsertSQL = "insert into " + schemaObjectPrefix + "BUNDLE (BUNDLE_DATA, NODE_ID) values (?, ?)";
+            bundleUpdateSQL = "update " + schemaObjectPrefix + "BUNDLE set BUNDLE_DATA = ? where NODE_ID = ?";
+            bundleSelectSQL = "select BUNDLE_DATA from " + schemaObjectPrefix + "BUNDLE where NODE_ID = ?";
+            bundleDeleteSQL = "delete from " + schemaObjectPrefix + "BUNDLE where NODE_ID = ?";
+
+            nodeReferenceInsertSQL = "insert into " + schemaObjectPrefix + "REFS (REFS_DATA, NODE_ID) values (?, ?)";
+            nodeReferenceUpdateSQL = "update " + schemaObjectPrefix + "REFS set REFS_DATA = ? where NODE_ID = ?";
+            nodeReferenceSelectSQL = "select REFS_DATA from " + schemaObjectPrefix + "REFS where NODE_ID = ?";
+            nodeReferenceDeleteSQL = "delete from " + schemaObjectPrefix + "REFS where NODE_ID = ?";
+
+            bundleSelectAllIdsSQL = "select NODE_ID from " + schemaObjectPrefix + "BUNDLE ORDER BY NODE_ID";
+            bundleSelectAllIdsFromSQL = "select NODE_ID from " + schemaObjectPrefix + "BUNDLE WHERE NODE_ID > ? ORDER BY NODE_ID";
+            bundleSelectAllBundlesSQL = "select NODE_ID, BUNDLE_DATA from " + schemaObjectPrefix + "BUNDLE ORDER BY NODE_ID";
+            bundleSelectAllBundlesFromSQL = "select NODE_ID, BUNDLE_DATA from " + schemaObjectPrefix + "BUNDLE WHERE NODE_ID > ? ORDER BY NODE_ID";
+        } else {
+            bundleInsertSQL = "insert into " + schemaObjectPrefix + "BUNDLE (BUNDLE_DATA, NODE_ID_HI, NODE_ID_LO) values (?, ?, ?)";
+            bundleUpdateSQL = "update " + schemaObjectPrefix + "BUNDLE set BUNDLE_DATA = ? where NODE_ID_HI = ? and NODE_ID_LO = ?";
+            bundleSelectSQL = "select BUNDLE_DATA from " + schemaObjectPrefix + "BUNDLE where NODE_ID_HI = ? and NODE_ID_LO = ?";
+            bundleDeleteSQL = "delete from " + schemaObjectPrefix + "BUNDLE where NODE_ID_HI = ? and NODE_ID_LO = ?";
+
+            nodeReferenceInsertSQL =
+                "insert into " + schemaObjectPrefix + "REFS"
+                + " (REFS_DATA, NODE_ID_HI, NODE_ID_LO) values (?, ?, ?)";
+            nodeReferenceUpdateSQL =
+                "update " + schemaObjectPrefix + "REFS"
+                + " set REFS_DATA = ? where NODE_ID_HI = ? and NODE_ID_LO = ?";
+            nodeReferenceSelectSQL = "select REFS_DATA from " + schemaObjectPrefix + "REFS where NODE_ID_HI = ? and NODE_ID_LO = ?";
+            nodeReferenceDeleteSQL = "delete from " + schemaObjectPrefix + "REFS where NODE_ID_HI = ? and NODE_ID_LO = ?";
+
+            bundleSelectAllIdsSQL = "select NODE_ID_HI, NODE_ID_LO from " + schemaObjectPrefix 
+                + "BUNDLE ORDER BY NODE_ID_HI, NODE_ID_LO";
+            // need to use HI and LO parameters
+            // this is not the exact statement, but not all databases support WHERE (NODE_ID_HI, NODE_ID_LOW) >= (?, ?)
+            bundleSelectAllIdsFromSQL =
+                "select NODE_ID_HI, NODE_ID_LO from " + schemaObjectPrefix + "BUNDLE"
+                + " WHERE (NODE_ID_HI >= ?) AND (? IS NOT NULL)"
+                + " ORDER BY NODE_ID_HI, NODE_ID_LO";
+
+            bundleSelectAllBundlesSQL = "select NODE_ID_HI, NODE_ID_LO, BUNDLE_DATA from " + schemaObjectPrefix
+                    + "BUNDLE ORDER BY NODE_ID_HI, NODE_ID_LO";
+            // need to use HI and LO parameters
+            // this is not the exact statement, but not all databases support WHERE (NODE_ID_HI, NODE_ID_LOW) >= (?, ?)
+            bundleSelectAllBundlesFromSQL =
+                    "select NODE_ID_HI, NODE_ID_LO, BUNDLE_DATA from " + schemaObjectPrefix + "BUNDLE"
+                            + " WHERE (NODE_ID_HI >= ?) AND (? IS NOT NULL)"
+                            + " ORDER BY NODE_ID_HI, NODE_ID_LO";
+
+        }
+
+    }
+
+    /**
+     * Helper interface for closeable stores
+     */
+    protected static interface CloseableBLOBStore extends BLOBStore {
+        void close();
+    }
+
+    /**
+     * own implementation of the filesystem blob store that uses a different
+     * blob-id scheme.
+     */
+    protected class FSBlobStore extends FileSystemBLOBStore implements CloseableBLOBStore {
+
+        private FileSystem fs;
+
+        public FSBlobStore(FileSystem fs) {
+            super(fs);
+            this.fs = fs;
+        }
+
+        public String createId(PropertyId id, int index) {
+            return buildBlobFilePath(null, id, index).toString();
+        }
+
+        public void close() {
+            try {
+                fs.close();
+                fs = null;
+            } catch (Exception e) {
+                // ignore
+            }
+        }
+    }
+
+    /**
+     * Implementation of a blob store that stores the data inside the database
+     */
+    protected class DbBlobStore implements CloseableBLOBStore {
+
+        protected String blobInsertSQL;
+        protected String blobUpdateSQL;
+        protected String blobSelectSQL;
+        protected String blobSelectExistSQL;
+        protected String blobDeleteSQL;
+
+        public DbBlobStore() throws SQLException {
+            blobInsertSQL = "insert into " + schemaObjectPrefix + "BINVAL (BINVAL_DATA, BINVAL_ID) values (?, ?)";
+            blobUpdateSQL = "update " + schemaObjectPrefix + "BINVAL set BINVAL_DATA = ? where BINVAL_ID = ?";
+            blobSelectSQL = "select BINVAL_DATA from " + schemaObjectPrefix + "BINVAL where BINVAL_ID = ?";
+            blobSelectExistSQL = "select 1 from " + schemaObjectPrefix + "BINVAL where BINVAL_ID = ?";
+            blobDeleteSQL = "delete from " + schemaObjectPrefix + "BINVAL where BINVAL_ID = ?";
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public String createId(PropertyId id, int index) {
+            StringBuilder buf = new StringBuilder();
+            buf.append(id.getParentId().toString());
+            buf.append('.');
+            buf.append(getNsIndex().stringToIndex(id.getName().getNamespaceURI()));
+            buf.append('.');
+            buf.append(getNameIndex().stringToIndex(id.getName().getLocalName()));
+            buf.append('.');
+            buf.append(index);
+            return buf.toString();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public InputStream get(String blobId) throws Exception {
+            ResultSet rs = null;
+            boolean close = true;
+            try {
+                rs = conHelper.exec(blobSelectSQL, new Object[]{blobId}, false, 0);
+                if (!rs.next()) {
+                    throw new Exception("no such BLOB: " + blobId);
+                }
+
+                InputStream in = rs.getBinaryStream(1);
+                if (in == null) {
+                    // some databases treat zero-length values as NULL;
+                    // return empty InputStream in such a case
+                    return new ByteArrayInputStream(new byte[0]);
+                }
+
+                 // return an InputStream wrapper in order to close the ResultSet when the stream is closed
+                close = false;
+                final ResultSet rs2 = rs;
+                return new FilterInputStream(in) {
+
+                    public void close() throws IOException {
+                        try {
+                            in.close();
+                        } finally {
+                            // now it's safe to close ResultSet
+                            DbUtility.close(rs2);
+                        }
+                    }
+                };
+            } finally {
+                if (close) {
+                    DbUtility.close(rs);
+                }
+            }
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public synchronized void put(String blobId, InputStream in, long size)
+                throws Exception {
+            ResultSet rs = null;
+            boolean exists;
+                try {
+                    rs = conHelper.exec(blobSelectExistSQL, new Object[]{blobId}, false, 0);
+                    // a BLOB exists if the result has at least one entry
+                    exists = rs.next();
+                } finally {
+                    DbUtility.close(rs);
+                }
+            String sql = (exists) ? blobUpdateSQL : blobInsertSQL;
+            Object[] params = new Object[]{new StreamWrapper(in, size), blobId};
+            conHelper.exec(sql, params);
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public synchronized boolean remove(String blobId) throws Exception {
+            return conHelper.update(blobDeleteSQL, new Object[]{blobId}) == 1;
+        }
+
+        public void close() {
+            // closing the database resources of this blobstore is left to the
+            // owning BundleDbPersistenceManager
+        }
+    }
+}

+ 40 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/configs/db2.xml

@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE Repository PUBLIC "-//The Apache Software Foundation//DTD Jackrabbit 1.5//EN" "http://jackrabbit.apache.org/dtd/repository-1.5.dtd">
+<Repository>
+  <FileSystem class="com.bstek.urule.console.repository.database.system.DB2FileSystem">
+      <param name="schemaObjectPrefix" value="repo_"/>
+  </FileSystem>
+  <Security appName="Jackrabbit">
+    <AccessManager class="org.apache.jackrabbit.core.security.simple.SimpleAccessManager"></AccessManager>
+    <LoginModule class="org.apache.jackrabbit.core.security.simple.SimpleLoginModule">
+      <param name="anonymousId" value="anonymous" />
+      <param name="adminId" value="admin" />
+    </LoginModule>
+  </Security>
+  <DataStore class="com.bstek.urule.console.repository.database.DatabaseDataStore">
+      <param name="schemaObjectPrefix" value="repo_ds_"/>
+  </DataStore>
+  <Workspaces rootPath="${rep.home}/workspaces" defaultWorkspace="default" />
+  <Workspace name="default">
+    <FileSystem class="com.bstek.urule.console.repository.database.system.DB2FileSystem">
+      <param name="schemaObjectPrefix" value="repo_${wsp.name}_"/>
+    </FileSystem>
+    <PersistenceManager class="com.bstek.urule.console.repository.database.DbPersistenceManager">
+      <param name="schemaObjectPrefix" value="repo_pm_${wsp.name}_"/>
+    </PersistenceManager>
+  </Workspace>
+  <Versioning rootPath="${rep.home}/version">
+    <FileSystem class="com.bstek.urule.console.repository.database.system.DB2FileSystem">
+      <param name="schemaObjectPrefix" value="repo_fsver_"/>
+    </FileSystem>
+    <PersistenceManager class="com.bstek.urule.console.repository.database.DbPersistenceManager">
+      <param name="schemaObjectPrefix" value="repo_ver_"/>
+    </PersistenceManager>
+  </Versioning>
+  
+	<Cluster syncDelay="5000">
+	    <Journal class="com.bstek.urule.console.repository.database.journal.DatabaseJournal">
+	      <param name="schemaObjectPrefix" value="journal_"/>
+	    </Journal>
+	</Cluster>
+</Repository>

+ 40 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/configs/derby.xml

@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE Repository PUBLIC "-//The Apache Software Foundation//DTD Jackrabbit 1.5//EN" "http://jackrabbit.apache.org/dtd/repository-1.5.dtd">
+<Repository>
+  <FileSystem class="com.bstek.urule.console.repository.database.system.DerbyFileSystem">
+      <param name="schemaObjectPrefix" value="repo_"/>
+  </FileSystem>
+  <Security appName="Jackrabbit">
+    <AccessManager class="org.apache.jackrabbit.core.security.simple.SimpleAccessManager"></AccessManager>
+    <LoginModule class="org.apache.jackrabbit.core.security.simple.SimpleLoginModule">
+      <param name="anonymousId" value="anonymous" />
+      <param name="adminId" value="admin" />
+    </LoginModule>
+  </Security>
+  <DataStore class="com.bstek.urule.console.repository.database.store.DerbyDataStore">
+      <param name="schemaObjectPrefix" value="repo_ds_"/>
+  </DataStore>
+  <Workspaces rootPath="${rep.home}/workspaces" defaultWorkspace="default" />
+  <Workspace name="default">
+    <FileSystem class="com.bstek.urule.console.repository.database.system.DerbyFileSystem">
+      <param name="schemaObjectPrefix" value="repo_${wsp.name}_"/>
+    </FileSystem>
+    <PersistenceManager class="com.bstek.urule.console.repository.database.manager.DerbyPersistenceManager">
+      <param name="schemaObjectPrefix" value="repo_pm_${wsp.name}_"/>
+    </PersistenceManager>
+  </Workspace>
+  <Versioning rootPath="${rep.home}/version">
+    <FileSystem class="com.bstek.urule.console.repository.database.system.DerbyFileSystem">
+      <param name="schemaObjectPrefix" value="repo_fsver_"/>
+    </FileSystem>
+    <PersistenceManager class="com.bstek.urule.console.repository.database.manager.DerbyPersistenceManager">
+      <param name="schemaObjectPrefix" value="repo_ver_"/>
+    </PersistenceManager>
+  </Versioning>
+  
+	<Cluster syncDelay="5000">
+	    <Journal class="com.bstek.urule.console.repository.database.journal.DatabaseJournal">
+	      <param name="schemaObjectPrefix" value="journal_"/>
+	    </Journal>
+	</Cluster>
+</Repository>

+ 40 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/configs/mssql.xml

@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE Repository PUBLIC "-//The Apache Software Foundation//DTD Jackrabbit 1.5//EN" "http://jackrabbit.apache.org/dtd/repository-1.5.dtd">
+<Repository>
+  <FileSystem class="com.bstek.urule.console.repository.database.system.MSSqlFileSystem">
+      <param name="schemaObjectPrefix" value="repo_"/>
+  </FileSystem>
+  <Security appName="Jackrabbit">
+    <AccessManager class="org.apache.jackrabbit.core.security.simple.SimpleAccessManager"></AccessManager>
+    <LoginModule class="org.apache.jackrabbit.core.security.simple.SimpleLoginModule">
+      <param name="anonymousId" value="anonymous" />
+      <param name="adminId" value="admin" />
+    </LoginModule>
+  </Security>
+  <DataStore class="com.bstek.urule.console.repository.database.DatabaseDataStore">
+      <param name="schemaObjectPrefix" value="repo_ds_"/>
+  </DataStore>
+  <Workspaces rootPath="${rep.home}/workspaces" defaultWorkspace="default" />
+  <Workspace name="default">
+    <FileSystem class="com.bstek.urule.console.repository.database.system.MSSqlFileSystem">
+      <param name="schemaObjectPrefix" value="repo_${wsp.name}_"/>
+    </FileSystem>
+    <PersistenceManager class="com.bstek.urule.console.repository.database.manager.MSSqlPersistenceManager">
+      <param name="schemaObjectPrefix" value="repo_pm_${wsp.name}_"/>
+    </PersistenceManager>
+  </Workspace>
+  <Versioning rootPath="${rep.home}/version">
+    <FileSystem class="com.bstek.urule.console.repository.database.system.MSSqlFileSystem">
+      <param name="schemaObjectPrefix" value="repo_fsver_"/>
+    </FileSystem>
+    <PersistenceManager class="com.bstek.urule.console.repository.database.manager.MSSqlPersistenceManager">
+      <param name="schemaObjectPrefix" value="repo_ver_"/>
+    </PersistenceManager>
+  </Versioning>
+  
+	<Cluster syncDelay="5000">
+	    <Journal class="com.bstek.urule.console.repository.database.journal.MSSqlDatabaseJournal">
+	      <param name="schemaObjectPrefix" value="journal_"/>
+	    </Journal>
+	</Cluster>
+</Repository>

+ 40 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/configs/mysql.xml

@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE Repository PUBLIC "-//The Apache Software Foundation//DTD Jackrabbit 1.5//EN" "http://jackrabbit.apache.org/dtd/repository-1.5.dtd">
+<Repository>
+  <FileSystem class="com.bstek.urule.console.repository.database.system.MysqlFileSystem">
+      <param name="schemaObjectPrefix" value="repo_"/>
+  </FileSystem>
+  <Security appName="Jackrabbit">
+    <AccessManager class="org.apache.jackrabbit.core.security.simple.SimpleAccessManager"></AccessManager>
+    <LoginModule class="org.apache.jackrabbit.core.security.simple.SimpleLoginModule">
+      <param name="anonymousId" value="anonymous" />
+      <param name="adminId" value="admin" />
+    </LoginModule>
+  </Security>
+  <DataStore class="com.bstek.urule.console.repository.database.DatabaseDataStore">
+      <param name="schemaObjectPrefix" value="repo_ds_"/>
+  </DataStore>
+  <Workspaces rootPath="${rep.home}/workspaces" defaultWorkspace="default" />
+  <Workspace name="default">
+    <FileSystem class="com.bstek.urule.console.repository.database.system.MysqlFileSystem">
+      <param name="schemaObjectPrefix" value="repo_${wsp.name}_"/>
+    </FileSystem>
+    <PersistenceManager class="com.bstek.urule.console.repository.database.manager.MySqlPersistenceManager">
+      <param name="schemaObjectPrefix" value="repo_pm_${wsp.name}_"/>
+    </PersistenceManager>
+  </Workspace>
+  <Versioning rootPath="${rep.home}/version">
+    <FileSystem class="com.bstek.urule.console.repository.database.system.MysqlFileSystem">
+      <param name="schemaObjectPrefix" value="repo_fsver_"/>
+    </FileSystem>
+    <PersistenceManager class="com.bstek.urule.console.repository.database.manager.MySqlPersistenceManager">
+      <param name="schemaObjectPrefix" value="repo_ver_"/>
+    </PersistenceManager>
+  </Versioning>
+  
+	<Cluster syncDelay="5000">
+	    <Journal class="com.bstek.urule.console.repository.database.journal.DatabaseJournal">
+	      <param name="schemaObjectPrefix" value="journal_"/>
+	    </Journal>
+	</Cluster>
+</Repository>

+ 40 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/configs/oracle.xml

@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE Repository PUBLIC "-//The Apache Software Foundation//DTD Jackrabbit 1.5//EN" "http://jackrabbit.apache.org/dtd/repository-1.5.dtd">
+<Repository>
+  <FileSystem class="com.bstek.urule.console.repository.database.system.OracleFileSystem">
+      <param name="schemaObjectPrefix" value="repo_"/>
+  </FileSystem>
+  <Security appName="Jackrabbit">
+    <AccessManager class="org.apache.jackrabbit.core.security.simple.SimpleAccessManager"></AccessManager>
+    <LoginModule class="org.apache.jackrabbit.core.security.simple.SimpleLoginModule">
+      <param name="anonymousId" value="anonymous" />
+      <param name="adminId" value="admin" />
+    </LoginModule>
+  </Security>
+  <DataStore class="com.bstek.urule.console.repository.database.DatabaseDataStore">
+      <param name="schemaObjectPrefix" value="repo_ds_"/>
+  </DataStore>
+  <Workspaces rootPath="${rep.home}/workspaces" defaultWorkspace="default" />
+  <Workspace name="default">
+    <FileSystem class="com.bstek.urule.console.repository.database.system.OracleFileSystem">
+      <param name="schemaObjectPrefix" value="repo_${wsp.name}_"/>
+    </FileSystem>
+    <PersistenceManager class="com.bstek.urule.console.repository.database.manager.OraclePersistenceManager">
+      <param name="schemaObjectPrefix" value="repo_pm_${wsp.name}_"/>
+    </PersistenceManager>
+  </Workspace>
+  <Versioning rootPath="${rep.home}/version">
+    <FileSystem class="com.bstek.urule.console.repository.database.system.OracleFileSystem">
+      <param name="schemaObjectPrefix" value="repo_fsver_"/>
+    </FileSystem>
+    <PersistenceManager class="com.bstek.urule.console.repository.database.manager.OraclePersistenceManager">
+      <param name="schemaObjectPrefix" value="repo_ver_"/>
+    </PersistenceManager>
+  </Versioning>
+  
+	<Cluster syncDelay="5000">
+	    <Journal class="com.bstek.urule.console.repository.database.journal.OracleDatabaseJournal">
+	      <param name="schemaObjectPrefix" value="journal_"/>
+	    </Journal>
+	</Cluster>
+</Repository>

+ 40 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/configs/postgresql.xml

@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE Repository PUBLIC "-//The Apache Software Foundation//DTD Jackrabbit 1.5//EN" "http://jackrabbit.apache.org/dtd/repository-1.5.dtd">
+<Repository>
+  <FileSystem class="com.bstek.urule.console.repository.database.system.PostgreSQLFileSystem">
+      <param name="schemaObjectPrefix" value="repo_"/>
+  </FileSystem>
+  <Security appName="Jackrabbit">
+    <AccessManager class="org.apache.jackrabbit.core.security.simple.SimpleAccessManager"></AccessManager>
+    <LoginModule class="org.apache.jackrabbit.core.security.simple.SimpleLoginModule">
+      <param name="anonymousId" value="anonymous" />
+      <param name="adminId" value="admin" />
+    </LoginModule>
+  </Security>
+  <DataStore class="com.bstek.urule.console.repository.database.DatabaseDataStore">
+      <param name="schemaObjectPrefix" value="repo_ds_"/>
+  </DataStore>
+  <Workspaces rootPath="${rep.home}/workspaces" defaultWorkspace="default" />
+  <Workspace name="default">
+    <FileSystem class="com.bstek.urule.console.repository.database.system.PostgreSQLFileSystem">
+      <param name="schemaObjectPrefix" value="repo_${wsp.name}_"/>
+    </FileSystem>
+    <PersistenceManager class="com.bstek.urule.console.repository.database.manager.PostgreSQLPersistenceManager">
+      <param name="schemaObjectPrefix" value="repo_pm_${wsp.name}_"/>
+    </PersistenceManager>
+  </Workspace>
+  <Versioning rootPath="${rep.home}/version">
+    <FileSystem class="com.bstek.urule.console.repository.database.system.PostgreSQLFileSystem">
+      <param name="schemaObjectPrefix" value="repo_fsver_"/>
+    </FileSystem>
+    <PersistenceManager class="com.bstek.urule.console.repository.database.manager.PostgreSQLPersistenceManager">
+      <param name="schemaObjectPrefix" value="repo_ver_"/>
+    </PersistenceManager>
+  </Versioning>
+  
+	<Cluster syncDelay="5000">
+	    <Journal class="com.bstek.urule.console.repository.database.journal.DatabaseJournal">
+	      <param name="schemaObjectPrefix" value="journal_"/>
+	    </Journal>
+	</Cluster>
+</Repository>

+ 828 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/journal/DatabaseJournal.java

@@ -0,0 +1,828 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.journal;
+
+import java.io.BufferedReader;
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Calendar;
+
+import javax.sql.DataSource;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.core.journal.AbstractJournal;
+import org.apache.jackrabbit.core.journal.AppendRecord;
+import org.apache.jackrabbit.core.journal.FileRevision;
+import org.apache.jackrabbit.core.journal.InstanceRevision;
+import org.apache.jackrabbit.core.journal.JournalException;
+import org.apache.jackrabbit.core.journal.RecordIterator;
+import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
+import org.apache.jackrabbit.core.util.db.ConnectionFactory;
+import org.apache.jackrabbit.core.util.db.ConnectionHelper;
+import org.apache.jackrabbit.core.util.db.DatabaseAware;
+import org.apache.jackrabbit.core.util.db.DbUtility;
+import org.apache.jackrabbit.core.util.db.StreamWrapper;
+import org.apache.jackrabbit.spi.commons.namespace.NamespaceResolver;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.bstek.urule.console.repository.RepositoryBuilder;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月7日
+ */
+public class DatabaseJournal extends AbstractJournal implements DatabaseAware{
+
+    /**
+     * Default journal table name, used to check schema completeness.
+     */
+    private static final String DEFAULT_JOURNAL_TABLE = "JOURNAL";
+
+    /**
+     * Local revisions table name, used to check schema completeness.
+     */
+    private static final String LOCAL_REVISIONS_TABLE = "LOCAL_REVISIONS";
+
+    /**
+     * Logger.
+     */
+    static Logger log = LoggerFactory.getLogger(DatabaseJournal.class);
+
+    /**
+     * Driver name, bean property.
+     */
+    private String driver;
+
+    /**
+     * Connection URL, bean property.
+     */
+    private String url;
+
+    /**
+     * Database type, bean property.
+     */
+    private String databaseType;
+
+    /**
+     * User name, bean property.
+     */
+    private String user;
+
+    /**
+     * Password, bean property.
+     */
+    private String password;
+
+    /**
+     * DataSource logical name, bean property.
+     */
+    private String dataSourceName;
+
+    /**
+     * The connection helper
+     */
+    ConnectionHelper conHelper;
+
+    /**
+     * Auto commit level.
+     */
+    private int lockLevel;
+
+    /**
+     * Locked revision.
+     */
+    private long lockedRevision;
+
+    /**
+     * Whether the revision table janitor thread is enabled.
+     */
+    private boolean janitorEnabled = false;
+
+    /**
+     * The sleep time of the revision table janitor in seconds, 1 day default.
+     */
+    int janitorSleep = 60 * 60 * 24;
+
+    /**
+     * Indicates when the next run of the janitor is scheduled.
+     * The first run is scheduled by default at 03:00 hours.
+     */
+    Calendar janitorNextRun = Calendar.getInstance();
+
+    {
+        if (janitorNextRun.get(Calendar.HOUR_OF_DAY) >= 3) {
+            janitorNextRun.add(Calendar.DAY_OF_MONTH, 1);
+        }
+        janitorNextRun.set(Calendar.HOUR_OF_DAY, 3);
+        janitorNextRun.set(Calendar.MINUTE, 0);
+        janitorNextRun.set(Calendar.SECOND, 0);
+        janitorNextRun.set(Calendar.MILLISECOND, 0);
+    }
+
+    private Thread janitorThread;
+
+    /**
+     * Whether the schema check must be done during initialization.
+     */
+    private boolean schemaCheckEnabled = true;
+
+    /**
+     * The instance that manages the local revision.
+     */
+    private DatabaseRevision databaseRevision;
+
+    /**
+     * SQL statement returning all revisions within a range.
+     */
+    protected String selectRevisionsStmtSQL;
+
+    /**
+     * SQL statement updating the global revision.
+     */
+    protected String updateGlobalStmtSQL;
+
+    /**
+     * SQL statement returning the global revision.
+     */
+    protected String selectGlobalStmtSQL;
+
+    /**
+     * SQL statement appending a new record.
+     */
+    protected String insertRevisionStmtSQL;
+
+    /**
+     * SQL statement returning the minimum of the local revisions.
+     */
+    protected String selectMinLocalRevisionStmtSQL;
+
+    /**
+     * SQL statement removing a set of revisions with from the journal table.
+     */
+    protected String cleanRevisionStmtSQL;
+
+    /**
+     * SQL statement returning the local revision of this cluster node.
+     */
+    protected String getLocalRevisionStmtSQL;
+
+    /**
+     * SQL statement for inserting the local revision of this cluster node.
+     */
+    protected String insertLocalRevisionStmtSQL;
+
+    /**
+     * SQL statement for updating the local revision of this cluster node.
+     */
+    protected String updateLocalRevisionStmtSQL;
+
+    /**
+     * Schema object prefix, bean property.
+     */
+    protected String schemaObjectPrefix;
+/*
+    *//**
+     * The repositories {@link ConnectionFactory}.
+     *//*
+    private ConnectionFactory connectionFactory;*/
+
+    public DatabaseJournal() {
+        databaseType = "default";
+        schemaObjectPrefix = "";
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public void setConnectionFactory(ConnectionFactory connnectionFactory) {
+        //this.connectionFactory = connnectionFactory;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public void init(String id, NamespaceResolver resolver)
+            throws JournalException {
+
+        super.init(id, resolver);
+
+        init();
+
+        try {
+            conHelper = createConnectionHelper(getDataSource());
+
+            // make sure schemaObjectPrefix consists of legal name characters only
+            schemaObjectPrefix = conHelper.prepareDbIdentifier(schemaObjectPrefix);
+
+            // check if schema objects exist and create them if necessary
+            if (isSchemaCheckEnabled()) {
+                createCheckSchemaOperation().run();
+            }
+
+            // Make sure that the LOCAL_REVISIONS table exists (see JCR-1087)
+            if (isSchemaCheckEnabled()) {
+                checkLocalRevisionSchema();
+            }
+
+            buildSQLStatements();
+            initInstanceRevisionAndJanitor();
+        } catch (Exception e) {
+            String msg = "Unable to create connection.";
+            throw new JournalException(msg, e);
+        }
+        log.info("DatabaseJournal initialized.");
+    }
+
+    private DataSource getDataSource() throws Exception {
+        /*if (getDataSourceName() == null || "".equals(getDataSourceName())) {
+            return connectionFactory.getDataSource(getDriver(), getUrl(), getUser(), getPassword());
+        } else {
+            return connectionFactory.getDataSource(dataSourceName);
+        }*/
+    	return RepositoryBuilder.datasource;
+    }
+
+    /**
+     * This method is called from the {@link #init(String, NamespaceResolver)} method of this class and
+     * returns a {@link ConnectionHelper} instance which is assigned to the {@code conHelper} field.
+     * Subclasses may override it to return a specialized connection helper.
+     *
+     * @param dataSrc the {@link DataSource} of this persistence manager
+     * @return a {@link ConnectionHelper}
+     * @throws Exception on error
+     */
+    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
+        return new ConnectionHelper(dataSrc, false);
+    }
+
+    /**
+     * This method is called from {@link #init(String, NamespaceResolver)} after the
+     * {@link #createConnectionHelper(DataSource)} method, and returns a default {@link CheckSchemaOperation}.
+     * Subclasses can overrride this implementation to get a customized implementation.
+     *
+     * @return a new {@link CheckSchemaOperation} instance
+     */
+    protected CheckSchemaOperation createCheckSchemaOperation() {
+        InputStream in = org.apache.jackrabbit.core.journal.DatabaseJournal.class.getResourceAsStream(databaseType + ".ddl");
+        return new CheckSchemaOperation(conHelper, in, schemaObjectPrefix + DEFAULT_JOURNAL_TABLE).addVariableReplacement(
+            CheckSchemaOperation.SCHEMA_OBJECT_PREFIX_VARIABLE, schemaObjectPrefix);
+    }
+
+    /**
+     * Completes initialization of this database journal. Base implementation
+     * checks whether the required bean properties <code>driver</code> and
+     * <code>url</code> have been specified and optionally deduces a valid
+     * database type. Should be overridden by subclasses that use a different way to
+     * create a connection and therefore require other arguments.
+     *
+     * @see #getConnection()
+     * @throws JournalException if initialization fails
+     */
+    protected void init() throws JournalException {
+    	databaseType=RepositoryBuilder.databaseType;
+    }
+
+    /**
+     * Initialize the instance revision manager and the janitor thread.
+     *
+     * @throws JournalException on error
+     */
+    protected void initInstanceRevisionAndJanitor() throws Exception {
+        databaseRevision = new DatabaseRevision();
+
+        // Get the local file revision from disk (upgrade; see JCR-1087)
+        long localFileRevision = 0L;
+        if (getRevision() != null) {
+            InstanceRevision currentFileRevision = new FileRevision(new File(getRevision()), true);
+            localFileRevision = currentFileRevision.get();
+            currentFileRevision.close();
+        }
+
+        // Now write the localFileRevision (or 0 if it does not exist) to the LOCAL_REVISIONS
+        // table, but only if the LOCAL_REVISIONS table has no entry yet for this cluster node
+        long localRevision = databaseRevision.init(localFileRevision);
+        log.info("Initialized local revision to " + localRevision);
+
+        // Start the clean-up thread if necessary.
+        if (janitorEnabled) {
+            janitorThread = new Thread(new RevisionTableJanitor(), "Jackrabbit-ClusterRevisionJanitor");
+            janitorThread.setDaemon(true);
+            janitorThread.start();
+            log.info("Cluster revision janitor thread started; first run scheduled at " + janitorNextRun.getTime());
+        } else {
+            log.info("Cluster revision janitor thread not started");
+        }
+    }
+
+    /* (non-Javadoc)
+     * @see org.apache.jackrabbit.core.journal.Journal#getInstanceRevision()
+     */
+    public InstanceRevision getInstanceRevision() throws JournalException {
+        return databaseRevision;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public RecordIterator getRecords(long startRevision) throws JournalException {
+        try {
+            return new DatabaseRecordIterator(conHelper.exec(selectRevisionsStmtSQL, new Object[]{new Long(
+                    startRevision)}, false, 0), getResolver(), getNamePathResolver());
+        } catch (SQLException e) {
+            throw new JournalException("Unable to return record iterator.", e);
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public RecordIterator getRecords() throws JournalException {
+        try {
+            return new DatabaseRecordIterator(conHelper.exec(selectRevisionsStmtSQL, new Object[]{new Long(
+                    Long.MIN_VALUE)}, false, 0), getResolver(), getNamePathResolver());
+        } catch (SQLException e) {
+            throw new JournalException("Unable to return record iterator.", e);
+        }
+    }
+
+    /**
+     * Synchronize contents from journal. May be overridden by subclasses.
+     * Do the initial sync in batchMode, since some databases (PSQL) when
+     * not in transactional mode, load all results in memory which causes
+     * out of memory. See JCR-2832
+     *
+     * @param startRevision start point (exclusive)
+     * @param startup indicates if the cluster node is syncing on startup 
+     *        or does a normal sync.
+     * @throws JournalException if an error occurs
+     */
+    @Override
+    protected void doSync(long startRevision, boolean startup) throws JournalException {
+        if (!startup) {
+            // if the cluster node is not starting do a normal sync
+            doSync(startRevision);
+        } else {
+            try {
+                startBatch();
+                try {
+                    doSync(startRevision);
+                } finally {
+                    endBatch(true);
+                }
+            } catch (SQLException e) {
+                throw new JournalException("Couldn't sync the cluster node", e);
+            }
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     * <p>
+     * This journal is locked by incrementing the current value in the table
+     * named <code>GLOBAL_REVISION</code>, which effectively write-locks this
+     * table. The updated value is then saved away and remembered in the
+     * appended record, because a save may entail multiple appends (JCR-884).
+     */
+    protected void doLock() throws JournalException {
+        ResultSet rs = null;
+        boolean succeeded = false;
+
+        try {
+            startBatch();
+        } catch (SQLException e) {
+            throw new JournalException("Unable to set autocommit to false.", e);
+        }
+
+        try {
+            conHelper.exec(updateGlobalStmtSQL);
+            rs = conHelper.exec(selectGlobalStmtSQL, null, false, 0);
+            if (!rs.next()) {
+                 throw new JournalException("No revision available.");
+            }
+            lockedRevision = rs.getLong(1);
+            succeeded = true;
+        } catch (SQLException e) {
+            throw new JournalException("Unable to lock global revision table.", e);
+        } finally {
+            DbUtility.close(rs);
+            if (!succeeded) {
+                doUnlock(false);
+            }
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    protected void doUnlock(boolean successful) {
+        endBatch(successful);
+    }
+
+    private void startBatch() throws SQLException {
+        if (lockLevel++ == 0) {
+            conHelper.startBatch();
+        }
+    }
+
+    private void endBatch(boolean successful) {
+        if (--lockLevel == 0) {
+            try {
+                conHelper.endBatch(successful);;
+            } catch (SQLException e) {
+                log.error("failed to end batch", e);
+            }
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     * <p>
+     * Save away the locked revision inside the newly appended record.
+     */
+    protected void appending(AppendRecord record) {
+        record.setRevision(lockedRevision);
+    }
+
+    /**
+     * {@inheritDoc}
+     * <p>
+     * We have already saved away the revision for this record.
+     */
+    protected void append(AppendRecord record, InputStream in, int length)
+            throws JournalException {
+
+        try {
+            conHelper.exec(insertRevisionStmtSQL, record.getRevision(), getId(), record.getProducerId(),
+                new StreamWrapper(in, length));
+
+        } catch (SQLException e) {
+            String msg = "Unable to append revision " + lockedRevision + ".";
+            throw new JournalException(msg, e);
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public void close() {
+        if (janitorThread != null) {
+            janitorThread.interrupt();
+        }
+    }
+
+    /**
+     * Checks if the local revision schema objects exist and creates them if they
+     * don't exist yet.
+     *
+     * @throws Exception if an error occurs
+     */
+    private void checkLocalRevisionSchema() throws Exception {
+        InputStream localRevisionDDLStream = null;
+        InputStream in = org.apache.jackrabbit.core.journal.DatabaseJournal.class.getResourceAsStream(databaseType + ".ddl");
+        try {
+            BufferedReader reader = new BufferedReader(new InputStreamReader(in));
+            String sql = reader.readLine();
+            while (sql != null) {
+                // Skip comments and empty lines, and select only the statement to create the LOCAL_REVISIONS
+                // table.
+                if (!sql.startsWith("#") && sql.length() > 0 && sql.indexOf(LOCAL_REVISIONS_TABLE) != -1) {
+                    localRevisionDDLStream = new ByteArrayInputStream(sql.getBytes());
+                    break;
+                }
+                // read next sql stmt
+                sql = reader.readLine();
+            }
+        } finally {
+            IOUtils.closeQuietly(in);
+        }
+        // Run the schema check for the single table
+        new CheckSchemaOperation(conHelper, localRevisionDDLStream, schemaObjectPrefix
+                + LOCAL_REVISIONS_TABLE).addVariableReplacement(
+            CheckSchemaOperation.SCHEMA_OBJECT_PREFIX_VARIABLE, schemaObjectPrefix).run();
+    }
+
+    /**
+     * Builds the SQL statements. May be overridden by subclasses to allow
+     * different table and/or column names.
+     */
+    protected void buildSQLStatements() {
+        selectRevisionsStmtSQL =
+            "select REVISION_ID, JOURNAL_ID, PRODUCER_ID, REVISION_DATA from "
+            + schemaObjectPrefix + "JOURNAL where REVISION_ID > ? order by REVISION_ID";
+        updateGlobalStmtSQL =
+            "update " + schemaObjectPrefix + "GLOBAL_REVISION"
+            + " set REVISION_ID = REVISION_ID + 1";
+        selectGlobalStmtSQL =
+            "select REVISION_ID from "
+            + schemaObjectPrefix + "GLOBAL_REVISION";
+        insertRevisionStmtSQL =
+            "insert into " + schemaObjectPrefix + "JOURNAL"
+            + " (REVISION_ID, JOURNAL_ID, PRODUCER_ID, REVISION_DATA) "
+            + "values (?,?,?,?)";
+        selectMinLocalRevisionStmtSQL =
+            "select MIN(REVISION_ID) from " + schemaObjectPrefix + "LOCAL_REVISIONS";
+        cleanRevisionStmtSQL =
+            "delete from " + schemaObjectPrefix + "JOURNAL " + "where REVISION_ID < ?";
+        getLocalRevisionStmtSQL =
+            "select REVISION_ID from " + schemaObjectPrefix + "LOCAL_REVISIONS "
+            + "where JOURNAL_ID = ?";
+        insertLocalRevisionStmtSQL =
+            "insert into " + schemaObjectPrefix + "LOCAL_REVISIONS "
+            + "(REVISION_ID, JOURNAL_ID) values (?,?)";
+        updateLocalRevisionStmtSQL =
+            "update " + schemaObjectPrefix + "LOCAL_REVISIONS "
+            + "set REVISION_ID = ? where JOURNAL_ID = ?";
+    }
+
+    /**
+     * Bean getters
+     */
+    public String getDriver() {
+        return driver;
+    }
+
+    public String getUrl() {
+        return url;
+    }
+
+    /**
+     * Get the database type.
+     *
+     * @return the database type
+     */
+    public String getDatabaseType() {
+        return databaseType;
+    }
+
+    /**
+     * Get the database type.
+     * @deprecated
+     * This method is deprecated; {@link #getDatabaseType} should be used instead.
+     *
+     * @return the database type
+     */
+    public String getSchema() {
+        return databaseType;
+    }
+
+    public String getSchemaObjectPrefix() {
+        return schemaObjectPrefix;
+    }
+
+    public String getUser() {
+        return user;
+    }
+
+    public String getPassword() {
+        return password;
+    }
+
+    public boolean getJanitorEnabled() {
+        return janitorEnabled;
+    }
+
+    public int getJanitorSleep() {
+        return janitorSleep;
+    }
+
+    public int getJanitorFirstRunHourOfDay() {
+        return janitorNextRun.get(Calendar.HOUR_OF_DAY);
+    }
+
+    /**
+     * Bean setters
+     */
+    public void setDriver(String driver) {
+        this.driver = driver;
+    }
+
+    public void setUrl(String url) {
+        this.url = url;
+    }
+
+    /**
+     * Set the database type.
+     *
+     * @param databaseType the database type
+     */
+    public void setDatabaseType(String databaseType) {
+        this.databaseType = databaseType;
+    }
+
+    /**
+     * Set the database type.
+    * @deprecated
+    * This method is deprecated; {@link #getDatabaseType} should be used instead.
+     *
+     * @param databaseType the database type
+     */
+    public void setSchema(String databaseType) {
+        this.databaseType = databaseType;
+    }
+
+    public void setSchemaObjectPrefix(String schemaObjectPrefix) {
+        this.schemaObjectPrefix = schemaObjectPrefix.toUpperCase();
+    }
+
+    public void setUser(String user) {
+        this.user = user;
+    }
+
+    public void setPassword(String password) {
+        this.password = password;
+    }
+
+    public void setJanitorEnabled(boolean enabled) {
+        this.janitorEnabled = enabled;
+    }
+
+    public void setJanitorSleep(int sleep) {
+        this.janitorSleep = sleep;
+    }
+
+    public void setJanitorFirstRunHourOfDay(int hourOfDay) {
+        janitorNextRun = Calendar.getInstance();
+        if (janitorNextRun.get(Calendar.HOUR_OF_DAY) >= hourOfDay) {
+            janitorNextRun.add(Calendar.DAY_OF_MONTH, 1);
+        }
+        janitorNextRun.set(Calendar.HOUR_OF_DAY, hourOfDay);
+        janitorNextRun.set(Calendar.MINUTE, 0);
+        janitorNextRun.set(Calendar.SECOND, 0);
+        janitorNextRun.set(Calendar.MILLISECOND, 0);
+    }
+
+    public String getDataSourceName() {
+        return dataSourceName;
+    }
+
+    public void setDataSourceName(String dataSourceName) {
+        this.dataSourceName = dataSourceName;
+    }
+
+    /**
+     * @return whether the schema check is enabled
+     */
+    public final boolean isSchemaCheckEnabled() {
+        return schemaCheckEnabled;
+    }
+
+    /**
+     * @param enabled set whether the schema check is enabled
+     */
+    public final void setSchemaCheckEnabled(boolean enabled) {
+        schemaCheckEnabled = enabled;
+    }
+
+    /**
+     * This class manages the local revision of the cluster node. It
+     * persists the local revision in the LOCAL_REVISIONS table in the
+     * clustering database.
+     */
+    public class DatabaseRevision implements InstanceRevision {
+
+        /**
+         * The cached local revision of this cluster node.
+         */
+        private long localRevision;
+
+        /**
+         * Indicates whether the init method has been called.
+         */
+        private boolean initialized = false;
+
+        /**
+         * Checks whether there's a local revision value in the database for this
+         * cluster node. If not, it writes the given default revision to the database.
+         *
+         * @param revision the default value for the local revision counter
+         * @return the local revision
+         * @throws JournalException on error
+         */
+        protected synchronized long init(long revision) throws JournalException {
+            ResultSet rs = null;
+            try {
+                // Check whether there is an entry in the database.
+                rs = conHelper.exec(getLocalRevisionStmtSQL, new Object[]{getId()}, false, 0);
+                boolean exists = rs.next();
+                if (exists) {
+                    revision = rs.getLong(1);
+                }
+
+                // Insert the given revision in the database
+                if (!exists) {
+                    conHelper.exec(insertLocalRevisionStmtSQL, revision, getId());
+                }
+
+                // Set the cached local revision and return
+                localRevision = revision;
+                initialized = true;
+                return revision;
+
+            } catch (SQLException e) {
+                log.warn("Failed to initialize local revision.", e);
+                throw new JournalException("Failed to initialize local revision", e);
+            } finally {
+                DbUtility.close(rs);
+            }
+        }
+
+        public synchronized long get() {
+            if (!initialized) {
+                throw new IllegalStateException("instance has not yet been initialized");
+            }
+            return localRevision;
+        }
+
+        public synchronized void set(long localRevision) throws JournalException {
+
+            if (!initialized) {
+                throw new IllegalStateException("instance has not yet been initialized");
+            }
+
+            // Update the cached value and the table with local revisions.
+            try {
+                conHelper.exec(updateLocalRevisionStmtSQL, localRevision, getId());
+                this.localRevision = localRevision;
+            } catch (SQLException e) {
+                log.warn("Failed to update local revision.", e);
+                throw new JournalException("Failed to update local revision.", e);
+            }
+        }
+
+        public void close() {
+            // nothing to do
+        }
+    }
+
+    /**
+     * Class for maintaining the revision table. This is only useful if all
+     * JR information except the search index is in the database (i.e., node types
+     * etc). In that case, revision data can safely be thrown away from the JOURNAL table.
+     */
+    public class RevisionTableJanitor implements Runnable {
+
+        /**
+         * {@inheritDoc}
+         */
+        public void run() {
+            while (!Thread.currentThread().isInterrupted()) {
+                try {
+                    log.info("Next clean-up run scheduled at " + janitorNextRun.getTime());
+                    long sleepTime = janitorNextRun.getTimeInMillis() - System.currentTimeMillis();
+                    if (sleepTime > 0) {
+                        Thread.sleep(sleepTime);
+                    }
+                    cleanUpOldRevisions();
+                    janitorNextRun.add(Calendar.SECOND, janitorSleep);
+                } catch (InterruptedException e) {
+                    Thread.currentThread().interrupt();
+                }
+            }
+            log.info("Interrupted: stopping clean-up task.");
+        }
+
+        /**
+         * Cleans old revisions from the clustering table.
+         */
+        protected void cleanUpOldRevisions() {
+            ResultSet rs = null;
+            try {
+                long minRevision = 0;
+                rs = conHelper.exec(selectMinLocalRevisionStmtSQL, null, false, 0);
+                boolean cleanUp = rs.next();
+                if (cleanUp) {
+                    minRevision = rs.getLong(1);
+                }
+
+                // Clean up if necessary:
+                if (cleanUp) {
+                    conHelper.exec(cleanRevisionStmtSQL, minRevision);
+                    log.info("Cleaned old revisions up to revision " + minRevision + ".");
+                }
+
+            } catch (Exception e) {
+                log.warn("Failed to clean up old revisions.", e);
+            } finally {
+                DbUtility.close(rs);
+            }
+        }
+    }
+}

+ 167 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/journal/DatabaseRecordIterator.java

@@ -0,0 +1,167 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.journal;
+
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.NoSuchElementException;
+
+import org.apache.jackrabbit.core.journal.JournalException;
+import org.apache.jackrabbit.core.journal.ReadRecord;
+import org.apache.jackrabbit.core.journal.Record;
+import org.apache.jackrabbit.core.journal.RecordIterator;
+import org.apache.jackrabbit.spi.commons.conversion.NamePathResolver;
+import org.apache.jackrabbit.spi.commons.namespace.NamespaceResolver;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月7日
+ */
+public class DatabaseRecordIterator implements RecordIterator {
+
+    /**
+     * Logger.
+     */
+    private static Logger log = LoggerFactory.getLogger(DatabaseRecordIterator.class);
+
+    /**
+     * Underlying result set.
+     */
+    private final ResultSet rs;
+
+    /**
+     * Namespace resolver.
+     */
+    private final NamespaceResolver resolver;
+
+    /**
+     * Name and Path resolver.
+     */
+    private final NamePathResolver npResolver;
+
+    /**
+     * Current record.
+     */
+    private ReadRecord record;
+
+    /**
+     * Last record returned.
+     */
+    private ReadRecord lastRecord;
+
+    /**
+     * Flag indicating whether EOF was reached.
+     */
+    private boolean isEOF;
+
+    /**
+     * Create a new instance of this class.
+     */
+    public DatabaseRecordIterator(ResultSet rs, NamespaceResolver resolver, NamePathResolver npResolver) {
+        this.rs = rs;
+        this.resolver = resolver;
+        this.npResolver = npResolver;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public boolean hasNext() {
+        try {
+            if (!isEOF && record == null) {
+                fetchRecord();
+            }
+            return !isEOF;
+        } catch (SQLException e) {
+            String msg = "Error while moving to next record.";
+            log.error(msg, e);
+            return false;
+        }
+    }
+
+    /**
+     * Return the next record. If there are no more records, throws
+     * a <code>NoSuchElementException</code>. If an error occurs,
+     * throws a <code>JournalException</code>.
+     *
+     * @return next record
+     * @throws java.util.NoSuchElementException if there are no more records
+     * @throws JournalException if another error occurs
+     */
+    public Record nextRecord() throws NoSuchElementException, JournalException {
+        if (!hasNext()) {
+            String msg = "No current record.";
+            throw new NoSuchElementException(msg);
+        }
+        close(lastRecord);
+        lastRecord = record;
+        record = null;
+
+        return lastRecord;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public void close() {
+        if (lastRecord != null) {
+            close(lastRecord);
+            lastRecord = null;
+        }
+        try {
+            rs.close();
+        } catch (SQLException e) {
+            String msg = "Error while closing result set: " + e.getMessage();
+            log.warn(msg);
+        }
+    }
+
+    /**
+     * Fetch the next record.
+     */
+    private void fetchRecord() throws SQLException {
+        if (rs.next()) {
+            long revision = rs.getLong(1);
+            String journalId = rs.getString(2);
+            String producerId = rs.getString(3);
+            DataInputStream dataIn = new DataInputStream(rs.getBinaryStream(4));
+            record = new ReadRecord(journalId, producerId, revision, dataIn, 0, resolver, npResolver);
+        } else {
+            isEOF = true;
+        }
+    }
+
+    /**
+     * Close a record.
+     *
+     * @param record record
+     */
+    private static void close(ReadRecord record) {
+        if (record != null) {
+            try {
+                record.close();
+            } catch (IOException e) {
+                String msg = "Error while closing record.";
+                log.warn(msg, e);
+            }
+        }
+    }
+
+}

+ 66 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/journal/MSSqlDatabaseJournal.java

@@ -0,0 +1,66 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.journal;
+
+import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月7日
+ */
+public class MSSqlDatabaseJournal extends DatabaseJournal {
+
+    /** the MS SQL table space to use */
+    protected String tableSpace = "";
+
+    /**
+     * Initialize this instance with the default schema and
+     * driver values.
+     */
+    public MSSqlDatabaseJournal() {
+        setDriver("com.microsoft.sqlserver.jdbc.SQLServerDriver");
+        setDatabaseType("mssql");
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected CheckSchemaOperation createCheckSchemaOperation() {
+        return super.createCheckSchemaOperation().addVariableReplacement(
+            CheckSchemaOperation.TABLE_SPACE_VARIABLE, tableSpace);
+    }
+
+    /**
+     * Returns the configured MS SQL table space.
+     * @return the configured MS SQL table space.
+     */
+    public String getTableSpace() {
+        return tableSpace;
+    }
+
+    /**
+     * Sets the MS SQL table space.
+     * @param tableSpace the MS SQL table space.
+     */
+    public void setTableSpace(String tableSpace) {
+        if (tableSpace != null && tableSpace.length() > 0) {
+            this.tableSpace = "on " + tableSpace.trim();
+        } else {
+            this.tableSpace = "";
+        }
+    }
+}

+ 131 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/journal/OracleDatabaseJournal.java

@@ -0,0 +1,131 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.journal;
+
+import javax.sql.DataSource;
+
+import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
+import org.apache.jackrabbit.core.util.db.ConnectionHelper;
+import org.apache.jackrabbit.core.util.db.OracleConnectionHelper;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月7日
+ */
+public class OracleDatabaseJournal extends DatabaseJournal {
+	   /**
+     * The default tablespace clause used when {@link #tablespace} or {@link #indexTablespace}
+     * are not specified.
+     */
+    protected static final String DEFAULT_TABLESPACE_CLAUSE = "";
+
+    /**
+     * Name of the replacement variable in the DDL for {@link #tablespace}.
+     */
+    protected static final String TABLESPACE_VARIABLE = "${tablespace}";
+
+    /**
+     * Name of the replacement variable in the DDL for {@link #indexTablespace}.
+     */
+    protected static final String INDEX_TABLESPACE_VARIABLE = "${indexTablespace}";
+
+    /** The Oracle tablespace to use for tables */
+    protected String tablespace;
+
+    /** The Oracle tablespace to use for indexes */
+    protected String indexTablespace;
+
+    public OracleDatabaseJournal() {
+        setDatabaseType("oracle");
+        setDriver("oracle.jdbc.OracleDriver");
+        setSchemaObjectPrefix("");
+        tablespace = DEFAULT_TABLESPACE_CLAUSE;
+        indexTablespace = DEFAULT_TABLESPACE_CLAUSE;
+    }
+
+    /**
+     * Returns the configured Oracle tablespace for tables.
+     * @return the configured Oracle tablespace for tables.
+     */
+    public String getTablespace() {
+        return tablespace;
+    }
+
+    /**
+     * Sets the Oracle tablespace for tables.
+     * @param tablespaceName the Oracle tablespace for tables.
+     */
+    public void setTablespace(String tablespaceName) {
+        this.tablespace = this.buildTablespaceClause(tablespaceName);
+    }
+    
+    /**
+     * Returns the configured Oracle tablespace for indexes.
+     * @return the configured Oracle tablespace for indexes.
+     */
+    public String getIndexTablespace() {
+        return indexTablespace;
+    }
+    
+    /**
+     * Sets the Oracle tablespace for indexes.
+     * @param tablespaceName the Oracle tablespace for indexes.
+     */
+    public void setIndexTablespace(String tablespaceName) {
+        this.indexTablespace = this.buildTablespaceClause(tablespaceName);
+    }
+    
+    /**
+     * Constructs the <code>tablespace &lt;tbs name&gt;</code> clause from
+     * the supplied tablespace name. If the name is empty, {@link #DEFAULT_TABLESPACE_CLAUSE}
+     * is returned instead.
+     * 
+     * @param tablespaceName A tablespace name
+     * @return A tablespace clause using the supplied name or
+     * <code>{@value #DEFAULT_TABLESPACE_CLAUSE}</code> if the name is empty
+     */
+    private String buildTablespaceClause(String tablespaceName) {
+        if (tablespaceName == null || tablespaceName.trim().length() == 0) {
+            return DEFAULT_TABLESPACE_CLAUSE;
+        } else {
+            return "tablespace " + tablespaceName.trim();
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
+        OracleConnectionHelper helper = new OracleConnectionHelper(dataSrc, false);
+        helper.init();
+        return helper;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected CheckSchemaOperation createCheckSchemaOperation() {
+        if (DEFAULT_TABLESPACE_CLAUSE.equals(indexTablespace) && !DEFAULT_TABLESPACE_CLAUSE.equals(tablespace)) {
+            // tablespace was set but not indexTablespace : use the same for both
+            indexTablespace = tablespace;
+        }
+        return super.createCheckSchemaOperation()
+            .addVariableReplacement(TABLESPACE_VARIABLE, tablespace)
+            .addVariableReplacement(INDEX_TABLESPACE_VARIABLE, indexTablespace);
+    }
+}

+ 279 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/manager/DerbyPersistenceManager.java

@@ -0,0 +1,279 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.manager;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+
+import javax.sql.DataSource;
+
+import org.apache.jackrabbit.core.persistence.PMContext;
+import org.apache.jackrabbit.core.persistence.pool.BundleDbPersistenceManager;
+import org.apache.jackrabbit.core.util.db.ConnectionHelper;
+import org.apache.jackrabbit.core.util.db.DerbyConnectionHelper;
+
+import com.bstek.urule.console.repository.database.DbPersistenceManager;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月7日
+ */
+public class DerbyPersistenceManager extends DbPersistenceManager {
+
+    /** name of the embedded driver */
+    public static final String DERBY_EMBEDDED_DRIVER = "org.apache.derby.jdbc.EmbeddedDriver";
+
+    /** @see #setDerbyStorageInitialPages(String) */
+    private int derbyStorageInitialPages = 16;
+
+    /** @see #setDerbyStorageMinimumRecordSize(String) */
+    private int derbyStorageMinimumRecordSize = 512;
+
+    /** @see #setDerbyStoragePageCacheSize(String) */
+    private int derbyStoragePageCacheSize = 1024;
+
+    /** @see #setDerbyStoragePageReservedSpace(String) */
+    private int derbyStoragePageReservedSpace = 20;
+
+    /** @see #setDerbyStoragePageSize(String) */
+    private int derbyStoragePageSize = 16384;
+
+    /**
+     * @see #setDerbyStorageInitialPages
+     * @return the initial pages property
+     */
+    public String getDerbyStorageInitialPages() {
+        return String.valueOf(derbyStorageInitialPages);
+    }
+
+    /**
+     * The on-disk size of a Derby table grows by one page at a time until eight
+     * pages of user data (or nine pages of total disk use, one is used for
+     * overhead) have been allocated. Then it will grow by eight pages at a time
+     * if possible.
+     * <p>
+     * A Derby table or index can be created with a number of pages already
+     * pre-allocated. To do so, specify the property prior to the CREATE TABLE
+     * or CREATE INDEX statement.
+     * <p>
+     * Define the number of user pages the table or index is to be created with.
+     * The purpose of this property is to preallocate a table or index of
+     * reasonable size if the user expects that a large amount of data will be
+     * inserted into the table or index. A table or index that has the
+     * pre-allocated pages will enjoy a small performance improvement over a
+     * table or index that has no pre-allocated pages when the data are loaded.
+     * <p>
+     * The total desired size of the table or index should be
+     * <p>
+     * <strong>(1+derby.storage.initialPages) * derby.storage.pageSize bytes.</strong>
+     * <p>
+     * When you create a table or an index after setting this property, Derby
+     * attempts to preallocate the requested number of user pages. However, the
+     * operations do not fail even if they are unable to preallocate the
+     * requested number of pages, as long as they allocate at least one page.
+     * <p>
+     * Default is <code>16</code>
+     *
+     * @param derbyStorageInitialPages the number of initial pages
+     */
+    public void setDerbyStorageInitialPages(String derbyStorageInitialPages) {
+        this.derbyStorageInitialPages =
+                Integer.decode(derbyStorageInitialPages).intValue();
+    }
+
+    /**
+     * @see #setDerbyStorageMinimumRecordSize
+     * @return the minimum record size
+     */
+    public String getDerbyStorageMinimumRecordSize() {
+        return String.valueOf(derbyStorageMinimumRecordSize);
+    }
+
+    /**
+     * Indicates the minimum user row size in bytes for on-disk database pages
+     * for tables when you are creating a table. This property ensures that
+     * there is enough room for a row to grow on a page when updated without
+     * having to overflow. This is generally most useful for VARCHAR and
+     * VARCHAR FOR BIT DATA data types and for tables that are updated a lot,
+     * in which the rows start small and grow due to updates. Reserving the
+     * space at the time of insertion minimizes row overflow due to updates,
+     * but it can result in wasted space. Set the property prior to issuing the
+     * CREATE TABLE statement.
+     * <p>
+     * Default is <code>256</code>
+     *
+     * @param derbyStorageMinimumRecordSize the minimum record size
+     */
+    public void setDerbyStorageMinimumRecordSize(String derbyStorageMinimumRecordSize) {
+        this.derbyStorageMinimumRecordSize =
+                Integer.decode(derbyStorageMinimumRecordSize).intValue();
+    }
+
+    /**
+     * @see #setDerbyStoragePageCacheSize
+     * @return the page cache size
+     */
+    public String getDerbyStoragePageCacheSize() {
+        return String.valueOf(derbyStoragePageCacheSize);
+    }
+
+    /**
+     * Defines the size, in number of pages, of the database's data page cache
+     * (data pages kept in memory). The actual amount of memory the page cache
+     * will use depends on the following:
+     * <ul>
+     * <li> the size of the cache (configured with {@link #setDerbyStoragePageCacheSize})
+     * <li> the size of the pages (configured with {@link #setDerbyStoragePageSize})
+     * <li> overhead (varies with JVMs)
+     * </ul>
+     * When increasing the size of the page cache, you typically have to allow
+     * more memory for the Java heap when starting the embedding application
+     * (taking into consideration, of course, the memory needs of the embedding
+     * application as well). For example, using the default page size of 4K, a
+     * page cache size of 2000 pages will require at least 8 MB of memory (and
+     * probably more, given the overhead).
+     * <p>
+     * The minimum value is 40 pages. If you specify a lower value, Derby uses
+     * the default value.
+     * <p>
+     * Default is <code>1024</code> (which gives about 16mb memory usage given
+     * the default of 16384 as page size).
+     *
+     * @param derbyStoragePageCacheSize the page cache size
+     */
+    public void setDerbyStoragePageCacheSize(String derbyStoragePageCacheSize) {
+        this.derbyStoragePageCacheSize =
+                Integer.decode(derbyStoragePageCacheSize).intValue();
+    }
+
+
+    /**
+     * @see #setDerbyStoragePageReservedSpace
+     * @return the page reserved space
+     */
+    public String getDerbyStoragePageReservedSpace() {
+        return String.valueOf(derbyStoragePageReservedSpace);
+    }
+
+    /**
+     * Defines the percentage of space reserved for updates on an on-disk
+     * database page for tables only (not indexes); indicates the percentage of
+     * space to keep free on a page when inserting. Leaving reserved space on a
+     * page can minimize row overflow (and the associated performance hit)
+     * during updates. Once a page has been filled up to the reserved-space
+     * threshold, no new rows are allowed on the page. This reserved space is
+     * used only for rows that increase in size when updated, not for new
+     * inserts. Set this property prior to issuing the CREATE TABLE statement.
+     * <p>
+     * Regardless of the value of derby.storage.pageReservedSpace, an empty page
+     * always accepts at least one row.
+     * <p>
+     * Default is <code>20%</code>
+     *
+     * @param derbyStoragePageReservedSpace the page reserved space
+     */
+    public void setDerbyStoragePageReservedSpace(String derbyStoragePageReservedSpace) {
+        this.derbyStoragePageReservedSpace =
+                Integer.decode(derbyStoragePageReservedSpace).intValue();
+    }
+
+    /**
+     * @see #setDerbyStoragePageSize
+     * @return the page size
+     */
+    public String getDerbyStoragePageSize() {
+        return String.valueOf(derbyStoragePageSize);
+    }
+
+    /**
+     * Defines the page size, in bytes, for on-disk database pages for tables or
+     * indexes used during table or index creation. Page size can only be one
+     * the following values: 4096, 8192, 16384, or 32768. Set this property
+     * prior to issuing the CREATE TABLE or CREATE INDEX statement. This value
+     * will be used for the lifetime of the newly created conglomerates.
+     * <p>
+     * Default is <code>16384</code>
+     *
+     * @param derbyStoragePageSize the storage page size
+     */
+    public void setDerbyStoragePageSize(String derbyStoragePageSize) {
+        this.derbyStoragePageSize = Integer.decode(derbyStoragePageSize).intValue();
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    public void init(PMContext context) throws Exception {
+        // init default values
+        if (getDriver() == null) {
+            setDriver(DERBY_EMBEDDED_DRIVER);
+        }
+        if (getDatabaseType() == null) {
+            setDatabaseType("derby");
+        }
+        if (getUrl() == null) {
+            setUrl("jdbc:derby:" + context.getHomeDir().getPath() + "/db/itemState;create=true");
+        }
+        if (getSchemaObjectPrefix() == null) {
+            setSchemaObjectPrefix("");
+        }
+        super.init(context);
+        // set properties       
+        if (DERBY_EMBEDDED_DRIVER.equals(getDriver())) {
+            conHelper.exec("CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY "
+                    + "('derby.storage.initialPages', '" + derbyStorageInitialPages + "')");
+            conHelper.exec("CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY "
+                    + "('derby.storage.minimumRecordSize', '" + derbyStorageMinimumRecordSize + "')");
+            conHelper.exec("CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY "
+                    + "('derby.storage.pageCacheSize', '" + derbyStoragePageCacheSize + "')");
+            conHelper.exec("CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY "
+                    + "('derby.storage.pageReservedSpace', '" + derbyStoragePageReservedSpace + "')");
+            conHelper.exec("CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY " + "('derby.storage.pageSize', '"
+                    + derbyStoragePageSize + "')");
+        }
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) {
+        return new DerbyConnectionHelper(dataSrc, blockOnConnectionLoss);
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * Since Derby cannot handle binary indexes, we use long-long keys.
+     *
+     * @return {@link BundleDbPersistenceManager#SM_LONGLONG_KEYS}
+     */
+    public int getStorageModel() {
+        return BundleDbPersistenceManager.SM_LONGLONG_KEYS;
+    }
+
+    /**
+     * Closes the given connection by shutting down the embedded Derby
+     * database.
+     *
+     * @throws SQLException if an error occurs
+     * @see DatabasePersistenceManager#closeConnection(Connection)
+     */
+    public void close() throws Exception {
+        super.close();
+        ((DerbyConnectionHelper) conHelper).shutDown(getDriver());
+    }
+}

+ 69 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/manager/H2PersistenceManager.java

@@ -0,0 +1,69 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.manager;
+
+import org.apache.jackrabbit.core.persistence.PMContext;
+
+import com.bstek.urule.console.repository.database.DbPersistenceManager;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月7日
+ */
+public class H2PersistenceManager extends DbPersistenceManager {
+
+    /** the lock time out. see*/
+    private long lockTimeout = 10000;
+
+    /**
+     * Returns the lock timeout.
+     * @return the lock timeout
+     */
+    public String getLockTimeout() {
+        return String.valueOf(lockTimeout);
+    }
+
+    /**
+     * Sets the lock timeout in milliseconds.
+     * @param lockTimeout the lock timeout.
+     */
+    public void setLockTimeout(String lockTimeout) {
+        this.lockTimeout = Long.parseLong(lockTimeout);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+   public void init(PMContext context) throws Exception {
+        // init default values
+        if (getDriver() == null) {
+            setDriver("org.h2.Driver");
+        }
+        if (getUrl() == null) {
+            setUrl("jdbc:h2:file:" + context.getHomeDir().getPath() + "/db/itemState");
+        }
+        if (getDatabaseType() == null) {
+            setDatabaseType("h2");
+        }
+        if (getSchemaObjectPrefix() == null) {
+            setSchemaObjectPrefix("");
+        }
+
+        super.init(context);
+        
+        conHelper.exec("SET LOCK_TIMEOUT " + lockTimeout);
+    }
+}

+ 67 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/manager/MSSqlPersistenceManager.java

@@ -0,0 +1,67 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.manager;
+
+import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
+
+import com.bstek.urule.console.repository.database.DbPersistenceManager;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月7日
+ */
+public class MSSqlPersistenceManager extends DbPersistenceManager {
+
+    /** the MS SQL table space to use */
+    protected String tableSpace = "";
+
+    public MSSqlPersistenceManager() {
+        setDriver("com.microsoft.sqlserver.jdbc.SQLServerDriver");
+        setDatabaseType("mssql");
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected CheckSchemaOperation createCheckSchemaOperation() {
+        return super.createCheckSchemaOperation().addVariableReplacement(
+            CheckSchemaOperation.TABLE_SPACE_VARIABLE, tableSpace);
+    }
+
+    /**
+     * Returns the configured MS SQL table space.
+     * 
+     * @return the configured MS SQL table space.
+     */
+    public String getTableSpace() {
+        return tableSpace;
+    }
+
+    /**
+     * Sets the MS SQL table space.
+     * 
+     * @param tableSpace the MS SQL table space.
+     */
+    public void setTableSpace(String tableSpace) {
+        if (tableSpace != null && tableSpace.trim().length() > 0) {
+            this.tableSpace = "on " + tableSpace.trim();
+        } else {
+            this.tableSpace = "";
+        }
+    }
+
+}

+ 42 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/manager/MySqlPersistenceManager.java

@@ -0,0 +1,42 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.manager;
+
+import org.apache.jackrabbit.core.persistence.PMContext;
+
+import com.bstek.urule.console.repository.database.DbPersistenceManager;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月7日
+ */
+public class MySqlPersistenceManager extends DbPersistenceManager {
+
+    /**
+     * {@inheritDoc}
+     */
+    public void init(PMContext context) throws Exception {
+        // init default values
+        if (getDriver() == null) {
+            setDriver("org.gjt.mm.mysql.Driver");
+        }
+        if (getDatabaseType() == null) {
+            setDatabaseType("mysql");
+        }
+        super.init(context);
+    }
+
+}

+ 37 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/manager/Oracle9PersistenceManager.java

@@ -0,0 +1,37 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.manager;
+
+import javax.sql.DataSource;
+
+import org.apache.jackrabbit.core.util.db.ConnectionHelper;
+import org.apache.jackrabbit.core.util.db.Oracle10R1ConnectionHelper;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月7日
+ */
+public class Oracle9PersistenceManager extends OraclePersistenceManager {
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
+        Oracle10R1ConnectionHelper helper = new Oracle10R1ConnectionHelper(dataSrc, blockOnConnectionLoss);
+        helper.init();
+        return helper;
+    }
+}

+ 167 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/manager/OraclePersistenceManager.java

@@ -0,0 +1,167 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.manager;
+
+import java.sql.SQLException;
+
+import javax.sql.DataSource;
+
+import org.apache.jackrabbit.core.persistence.PMContext;
+import org.apache.jackrabbit.core.persistence.pool.DbNameIndex;
+import org.apache.jackrabbit.core.persistence.pool.NGKDbNameIndex;
+import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
+import org.apache.jackrabbit.core.util.db.ConnectionHelper;
+import org.apache.jackrabbit.core.util.db.OracleConnectionHelper;
+
+import com.bstek.urule.console.repository.database.DbPersistenceManager;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月7日
+ */
+public class OraclePersistenceManager extends DbPersistenceManager {
+    /**
+     * The default tablespace clause used when {@link #tablespace} or {@link #indexTablespace}
+     * are not specified.
+     */
+    protected static final String DEFAULT_TABLESPACE_CLAUSE = "";
+
+    /**
+     * Name of the replacement variable in the DDL for {@link #tablespace}.
+     */
+    protected static final String TABLESPACE_VARIABLE = "${tablespace}";
+
+    /**
+     * Name of the replacement variable in the DDL for {@link #indexTablespace}.
+     */
+    protected static final String INDEX_TABLESPACE_VARIABLE = "${indexTablespace}";
+
+    /** The Oracle tablespace to use for tables */
+    protected String tablespace;
+
+    /** The Oracle tablespace to use for indexes */
+    protected String indexTablespace;
+
+    /**
+     * Creates a new oracle persistence manager
+     */
+    public OraclePersistenceManager() {
+        tablespace = DEFAULT_TABLESPACE_CLAUSE;
+        indexTablespace = DEFAULT_TABLESPACE_CLAUSE;
+        // enable db blob support
+        setExternalBLOBs(false);
+    }
+
+    /**
+     * Returns the configured Oracle tablespace for tables.
+     * @return the configured Oracle tablespace for tables.
+     */
+    public String getTablespace() {
+        return tablespace;
+    }
+
+    /**
+     * Sets the Oracle tablespace for tables.
+     * @param tablespaceName the Oracle tablespace for tables.
+     */
+    public void setTablespace(String tablespaceName) {
+        this.tablespace = this.buildTablespaceClause(tablespaceName);
+    }
+    
+    /**
+     * Returns the configured Oracle tablespace for indexes.
+     * @return the configured Oracle tablespace for indexes.
+     */
+    public String getIndexTablespace() {
+        return indexTablespace;
+    }
+    
+    /**
+     * Sets the Oracle tablespace for indexes.
+     * @param tablespaceName the Oracle tablespace for indexes.
+     */
+    public void setIndexTablespace(String tablespaceName) {
+        this.indexTablespace = this.buildTablespaceClause(tablespaceName);
+    }
+    
+    /**
+     * Constructs the <code>tablespace &lt;tbs name&gt;</code> clause from
+     * the supplied tablespace name. If the name is empty, {@link #DEFAULT_TABLESPACE_CLAUSE}
+     * is returned instead.
+     * 
+     * @param tablespaceName A tablespace name
+     * @return A tablespace clause using the supplied name or
+     * <code>{@value #DEFAULT_TABLESPACE_CLAUSE}</code> if the name is empty
+     */
+    private String buildTablespaceClause(String tablespaceName) {
+        if (tablespaceName == null || tablespaceName.trim().length() == 0) {
+            return DEFAULT_TABLESPACE_CLAUSE;
+        } else {
+            return "tablespace " + tablespaceName.trim();
+        }
+    }
+
+    public void init(PMContext context) throws Exception {
+        // init default values
+        if (getDriver() == null) {
+            setDriver("oracle.jdbc.OracleDriver");
+        }
+        if (getUrl() == null) {
+            setUrl("jdbc:oracle:thin:@127.0.0.1:1521:xe");
+        }
+        if (getDatabaseType() == null) {
+            setDatabaseType("oracle");
+        }
+        if (getSchemaObjectPrefix() == null) {
+            setSchemaObjectPrefix(context.getHomeDir().getName() + "_");
+        }
+        super.init(context);
+    }
+
+    /**
+     * Returns a new instance of a NGKDbNameIndex.
+     * 
+     * @return a new instance of a NGKDbNameIndex.
+     * @throws SQLException if an SQL error occurs.
+     */
+    protected DbNameIndex createDbNameIndex() throws SQLException {
+        return new NGKDbNameIndex(conHelper, schemaObjectPrefix);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
+        OracleConnectionHelper helper = new OracleConnectionHelper(dataSrc, blockOnConnectionLoss);
+        helper.init();
+        return helper;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected CheckSchemaOperation createCheckSchemaOperation() {
+        if (DEFAULT_TABLESPACE_CLAUSE.equals(indexTablespace) && !DEFAULT_TABLESPACE_CLAUSE.equals(tablespace)) {
+            // tablespace was set but not indexTablespace : use the same for both
+            indexTablespace = tablespace;
+        }
+        return super.createCheckSchemaOperation()
+            .addVariableReplacement(TABLESPACE_VARIABLE, tablespace)
+            .addVariableReplacement(INDEX_TABLESPACE_VARIABLE, indexTablespace);
+    }
+}

+ 75 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/manager/PostgreSQLPersistenceManager.java

@@ -0,0 +1,75 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.manager;
+
+import java.sql.SQLException;
+
+import javax.sql.DataSource;
+
+import org.apache.jackrabbit.core.persistence.PMContext;
+import org.apache.jackrabbit.core.persistence.pool.DbNameIndex;
+import org.apache.jackrabbit.core.persistence.pool.PostgreSQLNameIndex;
+import org.apache.jackrabbit.core.util.db.ConnectionHelper;
+import org.apache.jackrabbit.core.util.db.PostgreSQLConnectionHelper;
+
+import com.bstek.urule.console.repository.database.DbPersistenceManager;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月7日
+ */
+public class PostgreSQLPersistenceManager extends DbPersistenceManager {
+
+    /**
+     * {@inheritDoc}
+     */
+    public void init(PMContext context) throws Exception {
+        // init default values
+        if (getDriver() == null) {
+            setDriver("org.postgresql.Driver");
+        }
+        if (getDatabaseType() == null) {
+            setDatabaseType("postgresql");
+        }
+        super.init(context);
+    }
+
+    /**
+     * Returns a new instance of a DbNameIndex.
+     * @return a new instance of a DbNameIndex.
+     * @throws java.sql.SQLException if an SQL error occurs.
+     */
+    protected DbNameIndex createDbNameIndex() throws SQLException {
+        return new PostgreSQLNameIndex(conHelper, schemaObjectPrefix);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
+    	return new PostgreSQLConnectionHelper(dataSrc, blockOnConnectionLoss);
+    }
+
+    /**
+     * returns the storage model
+     * @return the storage model
+     */
+    public int getStorageModel() {
+        return SM_LONGLONG_KEYS;
+    }
+
+}

+ 54 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/store/DerbyDataStore.java

@@ -0,0 +1,54 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.store;
+
+import java.sql.SQLException;
+
+import javax.sql.DataSource;
+
+import org.apache.jackrabbit.core.data.DataStoreException;
+import org.apache.jackrabbit.core.util.db.ConnectionHelper;
+import org.apache.jackrabbit.core.util.db.DerbyConnectionHelper;
+
+import com.bstek.urule.console.repository.database.DatabaseDataStore;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月7日
+ */
+public class DerbyDataStore extends DatabaseDataStore {
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
+        return new DerbyConnectionHelper(dataSrc, false);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public synchronized void close() throws DataStoreException {
+        super.close();
+        try {
+            ((DerbyConnectionHelper) conHelper).shutDown(getDriver());
+        } catch (SQLException e) {
+            throw new DataStoreException(e);
+        }
+    }
+}

+ 69 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/system/DB2FileSystem.java

@@ -0,0 +1,69 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.system;
+
+import com.bstek.urule.console.repository.database.BaseDbFileSystem;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月6日
+ */
+public class DB2FileSystem extends BaseDbFileSystem {
+
+	@Override
+	public String databaseType() {
+		return "db2";
+	}
+
+    /**
+     * Creates a new <code>DB2FileSystem</code> instance.
+     */
+    public DB2FileSystem() {
+        // preset some attributes to reasonable defaults
+        schema = "db2";
+        driver = "com.ibm.db2.jcc.DB2Driver";
+    }
+
+    //-----------------------------------------< DatabaseFileSystem overrides >
+    /**
+     * {@inheritDoc}
+     * <p>
+     * Since DB2 requires parameter markers within the select clause to be
+     * explicitly typed using <code>cast(? as type_name)</code> some statements
+     * had to be changed accordingly.
+     */
+    protected void buildSQLStatements() {
+        super.buildSQLStatements();
+
+        copyFileSQL = "insert into "
+                + schemaObjectPrefix + "FSENTRY "
+                + "(FSENTRY_PATH, FSENTRY_NAME, FSENTRY_DATA, "
+                + "FSENTRY_LASTMOD, FSENTRY_LENGTH) "
+                + "select cast(? as varchar(745)), cast(? as varchar(255)), FSENTRY_DATA, "
+                + "FSENTRY_LASTMOD, FSENTRY_LENGTH from "
+                + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ? "
+                + "and FSENTRY_NAME = ? and FSENTRY_DATA is not null";
+
+        copyFilesSQL = "insert into "
+                + schemaObjectPrefix + "FSENTRY "
+                + "(FSENTRY_PATH, FSENTRY_NAME, FSENTRY_DATA, "
+                + "FSENTRY_LASTMOD, FSENTRY_LENGTH) "
+                + "select cast(? as varchar(745)), FSENTRY_NAME, FSENTRY_DATA, "
+                + "FSENTRY_LASTMOD, FSENTRY_LENGTH from "
+                + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ? "
+                + "and FSENTRY_DATA is not null";
+    }
+}

+ 91 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/system/DerbyFileSystem.java

@@ -0,0 +1,91 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.system;
+
+import java.sql.SQLException;
+
+import javax.sql.DataSource;
+
+import org.apache.jackrabbit.core.fs.FileSystemException;
+import org.apache.jackrabbit.core.util.db.ConnectionHelper;
+import org.apache.jackrabbit.core.util.db.DerbyConnectionHelper;
+
+import com.bstek.urule.console.repository.database.BaseDbFileSystem;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月6日
+ */
+public class DerbyFileSystem extends BaseDbFileSystem {
+
+	@Override
+	public String databaseType() {
+		return "derby";
+	}
+
+	
+
+    /**
+     * Flag indicating whether this derby database should be shutdown on close.
+     */
+    protected boolean shutdownOnClose;
+
+    /**
+     * Creates a new <code>DerbyFileSystem</code> instance.
+     */
+    public DerbyFileSystem() {
+        // preset some attributes to reasonable defaults
+        schema = "derby";
+        driver = "org.apache.derby.jdbc.EmbeddedDriver";
+        shutdownOnClose = true;
+        initialized = false;
+    }
+
+    //----------------------------------------------------< setters & getters >
+
+    public boolean getShutdownOnClose() {
+        return shutdownOnClose;
+    }
+
+    public void setShutdownOnClose(boolean shutdownOnClose) {
+        this.shutdownOnClose = shutdownOnClose;
+    }
+
+    //-----------------------------------------------< DbFileSystem overrides >
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
+        return new DerbyConnectionHelper(dataSrc, false);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public void close() throws FileSystemException {
+        super.close();
+        if (shutdownOnClose) {
+            try {
+                ((DerbyConnectionHelper) conHelper).shutDown(driver);
+            } catch (SQLException e) {
+                throw new FileSystemException("failed to shutdown Derby", e);
+            }
+        }
+    }
+}

+ 78 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/system/MSSqlFileSystem.java

@@ -0,0 +1,78 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.system;
+
+import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
+
+import com.bstek.urule.console.repository.database.BaseDbFileSystem;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月6日
+ */
+public class MSSqlFileSystem extends BaseDbFileSystem {
+	@Override
+	public String databaseType() {
+		// TODO Auto-generated method stub
+		return "mssql";
+	}
+	
+
+    /** the variable for the MS SQL table space */
+    public static final String TABLE_SPACE_VARIABLE = "${tableSpace}";
+
+    /** the MS SQL table space to use */
+    protected String tableSpace = "";
+
+    /**
+     * Returns the configured MS SQL table space.
+     * @return the configured MS SQL table space.
+     */
+    public String getTableSpace() {
+        return tableSpace;
+    }
+
+    /**
+     * Sets the MS SQL table space.
+     * @param tableSpace the MS SQL table space.
+     */
+    public void setTableSpace(String tableSpace) {
+        if (tableSpace != null && tableSpace.length() > 0) {
+            this.tableSpace = "on " + tableSpace.trim();
+        } else {
+            this.tableSpace = "";
+        }
+    }
+
+    /**
+     * Creates a new <code>MSSqlFileSystem</code> instance.
+     */
+    public MSSqlFileSystem() {
+        // preset some attributes to reasonable defaults
+        schema = "mssql";
+        driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver";
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected CheckSchemaOperation createCheckSchemaOperation() {
+        return super.createCheckSchemaOperation().addVariableReplacement(
+            CheckSchemaOperation.TABLE_SPACE_VARIABLE, tableSpace);
+    }
+
+}

+ 29 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/system/MysqlFileSystem.java

@@ -0,0 +1,29 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.system;
+
+import com.bstek.urule.console.repository.database.BaseDbFileSystem;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月6日
+ */
+public class MysqlFileSystem extends BaseDbFileSystem {
+	@Override
+	public String databaseType() {
+		return "mysql";
+	}
+}

+ 37 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/system/Oracle9FileSystem.java

@@ -0,0 +1,37 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.system;
+
+import javax.sql.DataSource;
+
+import org.apache.jackrabbit.core.util.db.ConnectionHelper;
+import org.apache.jackrabbit.core.util.db.Oracle10R1ConnectionHelper;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月6日
+ */
+public class Oracle9FileSystem extends OracleFileSystem {
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
+        Oracle10R1ConnectionHelper helper = new Oracle10R1ConnectionHelper(dataSrc, false);
+        helper.init();
+        return helper;
+    }
+}

+ 256 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/system/OracleFileSystem.java

@@ -0,0 +1,256 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.system;
+
+import javax.sql.DataSource;
+
+import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
+import org.apache.jackrabbit.core.util.db.ConnectionHelper;
+import org.apache.jackrabbit.core.util.db.OracleConnectionHelper;
+
+import com.bstek.urule.console.repository.database.BaseDbFileSystem;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月6日
+ */
+public class OracleFileSystem extends BaseDbFileSystem{
+    @Override
+    public String databaseType() {
+    	return "oracle";
+    }
+	
+	 /**
+     * The default tablespace clause used when {@link #tablespace} or {@link #indexTablespace}
+     * are not specified.
+     */
+    protected static final String DEFAULT_TABLESPACE_CLAUSE = "";
+    
+    /**
+     * Name of the replacement variable in the DDL for {@link #tablespace}.
+     */
+    protected static final String TABLESPACE_VARIABLE = "${tablespace}";
+    
+    /**
+     * Name of the replacement variable in the DDL for {@link #indexTablespace}.
+     */
+    protected static final String INDEX_TABLESPACE_VARIABLE = "${indexTablespace}";
+
+    /** The Oracle tablespace to use for tables */
+    protected String tablespace;
+
+    /** The Oracle tablespace to use for indexes */
+    protected String indexTablespace;
+    
+    /**
+     * Creates a new <code>OracleFileSystem</code> instance.
+     */
+    public OracleFileSystem() {
+        // preset some attributes to reasonable defaults
+        schema = "oracle";
+        driver = "oracle.jdbc.OracleDriver";
+        schemaObjectPrefix = "";
+        tablespace = DEFAULT_TABLESPACE_CLAUSE;
+        indexTablespace = DEFAULT_TABLESPACE_CLAUSE;
+        initialized = false;
+    }
+
+    /**
+     * Returns the configured Oracle tablespace for tables.
+     * @return the configured Oracle tablespace for tables.
+     */
+    public String getTablespace() {
+        return tablespace;
+    }
+
+    /**
+     * Sets the Oracle tablespace for tables.
+     * @param tablespaceName the Oracle tablespace for tables.
+     */
+    public void setTablespace(String tablespaceName) {
+        this.tablespace = this.buildTablespaceClause(tablespaceName);
+    }
+    
+    /**
+     * Returns the configured Oracle tablespace for indexes.
+     * @return the configured Oracle tablespace for indexes.
+     */
+    public String getIndexTablespace() {
+        return indexTablespace;
+    }
+    
+    /**
+     * Sets the Oracle tablespace for indexes.
+     * @param tablespaceName the Oracle tablespace for indexes.
+     */
+    public void setIndexTablespace(String tablespaceName) {
+        this.indexTablespace = this.buildTablespaceClause(tablespaceName);
+    }
+    
+    /**
+     * Constructs the <code>tablespace &lt;tbs name&gt;</code> clause from
+     * the supplied tablespace name. If the name is empty, {@link #DEFAULT_TABLESPACE_CLAUSE}
+     * is returned instead.
+     * 
+     * @param tablespaceName A tablespace name
+     * @return A tablespace clause using the supplied name or
+     * <code>{@value #DEFAULT_TABLESPACE_CLAUSE}</code> if the name is empty
+     */
+    private String buildTablespaceClause(String tablespaceName) {
+        if (tablespaceName == null || tablespaceName.trim().length() == 0) {
+            return DEFAULT_TABLESPACE_CLAUSE;
+        } else {
+            return "tablespace " + tablespaceName.trim();
+        }
+    }
+
+    //-----------------------------------------< DatabaseFileSystem overrides >
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
+        OracleConnectionHelper helper = new OracleConnectionHelper(dataSrc, false);
+        helper.init();
+        return helper;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected CheckSchemaOperation createCheckSchemaOperation() {
+        if (DEFAULT_TABLESPACE_CLAUSE.equals(indexTablespace) && !DEFAULT_TABLESPACE_CLAUSE.equals(tablespace)) {
+            // tablespace was set but not indexTablespace : use the same for both
+            indexTablespace = tablespace;
+        }
+        return super.createCheckSchemaOperation()
+            .addVariableReplacement(TABLESPACE_VARIABLE, tablespace)
+            .addVariableReplacement(INDEX_TABLESPACE_VARIABLE, indexTablespace);
+    }
+
+    //-----------------------------------------< DatabaseFileSystem overrides >
+    
+    /**
+     * Builds the SQL statements
+     * <p>
+     * Since Oracle treats emtpy strings and BLOBs as null values the SQL
+     * statements had to be adapated accordingly. The following changes were
+     * necessary:
+     * <ul>
+     * <li>The distinction between file and folder entries is based on
+     * FSENTRY_LENGTH being null/not null rather than FSENTRY_DATA being
+     * null/not null because FSENTRY_DATA of a 0-length (i.e. empty) file is
+     * null in Oracle.</li>
+     * <li>Folder entries: Since the root folder has an empty name (which would
+     * be null in Oracle), an empty name is automatically converted and treated
+     * as " ".</li>
+     * </ul>
+     */
+    protected void buildSQLStatements() {
+        insertFileSQL = "insert into "
+                + schemaObjectPrefix + "FSENTRY "
+                + "(FSENTRY_PATH, FSENTRY_NAME, FSENTRY_DATA, "
+                + "FSENTRY_LASTMOD, FSENTRY_LENGTH) "
+                + "values (?, ?, ?, ?, ?)";
+
+        insertFolderSQL = "insert into "
+                + schemaObjectPrefix + "FSENTRY "
+                + "(FSENTRY_PATH, FSENTRY_NAME, FSENTRY_LASTMOD, FSENTRY_LENGTH) "
+                + "values (?, nvl(?, ' '), ?, null)";
+
+        updateDataSQL = "update "
+                + schemaObjectPrefix + "FSENTRY "
+                + "set FSENTRY_DATA = ?, FSENTRY_LASTMOD = ?, FSENTRY_LENGTH = ? "
+                + "where FSENTRY_PATH = ? and FSENTRY_NAME = ? "
+                + "and FSENTRY_LENGTH is not null";
+
+        updateLastModifiedSQL = "update "
+                + schemaObjectPrefix + "FSENTRY set FSENTRY_LASTMOD = ? "
+                + "where FSENTRY_PATH = ? and FSENTRY_NAME = ? "
+                + "and FSENTRY_LENGTH is not null";
+
+        selectExistSQL = "select 1 from "
+                + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ? "
+                + "and FSENTRY_NAME = nvl(?, ' ')";
+
+        selectFileExistSQL = "select 1 from "
+                + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ? "
+                + "and FSENTRY_NAME = ? and FSENTRY_LENGTH is not null";
+
+        selectFolderExistSQL = "select 1 from "
+                + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ? "
+                + "and FSENTRY_NAME = nvl(?, ' ') and FSENTRY_LENGTH is null";
+
+        selectFileNamesSQL = "select FSENTRY_NAME from "
+                + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ? "
+                + "and FSENTRY_LENGTH is not null";
+
+        selectFolderNamesSQL = "select FSENTRY_NAME from "
+                + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ? "
+                + "and FSENTRY_NAME != ' ' "
+                + "and FSENTRY_LENGTH is null";
+
+        selectFileAndFolderNamesSQL = "select FSENTRY_NAME from "
+                + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ? "
+                + "and FSENTRY_NAME != ' '";
+
+        selectChildCountSQL = "select count(FSENTRY_NAME) from "
+                + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ?  "
+                + "and FSENTRY_NAME != ' '";
+
+        selectDataSQL = "select nvl(FSENTRY_DATA, empty_blob()) from "
+                + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ? "
+                + "and FSENTRY_NAME = ? and FSENTRY_LENGTH is not null";
+
+        selectLastModifiedSQL = "select FSENTRY_LASTMOD from "
+                + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ? "
+                + "and FSENTRY_NAME = nvl(?, ' ')";
+
+        selectLengthSQL = "select nvl(FSENTRY_LENGTH, 0) from "
+                + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ? "
+                + "and FSENTRY_NAME = ? and FSENTRY_LENGTH is not null";
+
+        deleteFileSQL = "delete from "
+                + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ? "
+                + "and FSENTRY_NAME = ? and FSENTRY_LENGTH is not null";
+
+        deleteFolderSQL = "delete from "
+                + schemaObjectPrefix + "FSENTRY where "
+                + "(FSENTRY_PATH = ? and FSENTRY_NAME = nvl(?, ' ') and FSENTRY_LENGTH is null) "
+                + "or (FSENTRY_PATH = ?) "
+                + "or (FSENTRY_PATH like ?) ";
+
+        copyFileSQL = "insert into "
+                + schemaObjectPrefix + "FSENTRY "
+                + "(FSENTRY_PATH, FSENTRY_NAME, FSENTRY_DATA, "
+                + "FSENTRY_LASTMOD, FSENTRY_LENGTH) "
+                + "select ?, ?, FSENTRY_DATA, "
+                + "FSENTRY_LASTMOD, FSENTRY_LENGTH from "
+                + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ? "
+                + "and FSENTRY_NAME = ? and FSENTRY_LENGTH is not null";
+
+        copyFilesSQL = "insert into "
+                + schemaObjectPrefix + "FSENTRY "
+                + "(FSENTRY_PATH, FSENTRY_NAME, FSENTRY_DATA, "
+                + "FSENTRY_LASTMOD, FSENTRY_LENGTH) "
+                + "select ?, FSENTRY_NAME, FSENTRY_DATA, "
+                + "FSENTRY_LASTMOD, FSENTRY_LENGTH from "
+                + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ? "
+                + "and FSENTRY_LENGTH is not null";
+    }
+}

+ 29 - 0
urule-console/src/main/java/com/bstek/urule/console/repository/database/system/PostgreSQLFileSystem.java

@@ -0,0 +1,29 @@
+/*******************************************************************************
+ * Copyright 2017 Bstek
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License.  You may obtain a copy
+ * of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ ******************************************************************************/
+package com.bstek.urule.console.repository.database.system;
+
+import com.bstek.urule.console.repository.database.BaseDbFileSystem;
+
+/**
+ * @author Jacky.gao
+ * @since 2017年12月6日
+ */
+public class PostgreSQLFileSystem extends BaseDbFileSystem {
+	@Override
+	public String databaseType() {
+		return "postgresql";
+	}
+}

+ 2 - 0
urule-console/src/main/resources/urule-console-context.properties

@@ -2,3 +2,5 @@ urule.repository.xml=
 urule.decisionTree.style=new
 urule.welcomePage=
 urule.console.title=URule Console
+urule.repository.datasourcename=
+urule.repository.databasetype=

+ 2 - 0
urule-console/src/main/resources/urule-console-context.xml

@@ -101,6 +101,8 @@
 	<bean id="urule.repositoryBuilder" class="com.bstek.urule.console.repository.RepositoryBuilder">
 		<property name="repositoryXml" value="${urule.repository.xml}"></property>
 		<property name="repoHomeDir" value="${urule.repository.dir}"></property>
+		<property name="repositoryDatasourceName" value="${urule.repository.datasourcename}"></property>
+		<property name="databaseType" value="${urule.repository.databasetype}"></property>
 	</bean>
 	
 	<bean id="urule.repositoryService" class="com.bstek.urule.console.repository.RepositoryServiceImpl">