Index: jackrabbit-core/src/test/repository/workspaces/workspace-init-test/workspace.xml
===================================================================
--- jackrabbit-core/src/test/repository/workspaces/workspace-init-test/workspace.xml	(.../sandbox/JCR-1456)	(revision 0)
+++ jackrabbit-core/src/test/repository/workspaces/workspace-init-test/workspace.xml	(.../trunk)	(revision 827970)
@@ -0,0 +1,41 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+  -->
+<Workspace name="workspace-init-test">
+  <!--
+      virtual file system of the workspace:
+      class: FQN of class implementing FileSystem interface
+  -->
+  <FileSystem class="org.apache.jackrabbit.core.fs.local.LocalFileSystem">
+    <param name="path" value="${wsp.home}" />
+  </FileSystem>
+
+  <!--
+      persistence of the workspace:
+      class: FQN of class implementing PersistenceManager interface
+  -->
+  <PersistenceManager class="org.apache.jackrabbit.core.persistence.mem.InMemPersistenceManager"/>
+  
+  <!--
+      Search index and the file system it uses.
+  -->
+  <SearchIndex class="org.apache.jackrabbit.core.query.lucene.SlowQueryHandler">
+    <param name="path" value="${wsp.home}/index" />
+    <param name="directoryManagerClass" value="org.apache.jackrabbit.core.query.lucene.directory.RAMDirectoryManager"/>
+  </SearchIndex>
+</Workspace>
+

Property changes on: jackrabbit-core/src/test/repository/workspaces/workspace-init-test/workspace.xml
___________________________________________________________________
Added: svn:eol-style
   + native

Index: jackrabbit-core/src/test/java/org/apache/jackrabbit/core/fs/db/DerbyFileSystemTest.java
===================================================================
--- jackrabbit-core/src/test/java/org/apache/jackrabbit/core/fs/db/DerbyFileSystemTest.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/test/java/org/apache/jackrabbit/core/fs/db/DerbyFileSystemTest.java	(.../trunk)	(revision 827970)
@@ -17,24 +17,20 @@
 package org.apache.jackrabbit.core.fs.db;
 
 import java.io.File;
+import java.io.IOException;
 
-import org.apache.commons.io.FileUtils;
 import org.apache.jackrabbit.core.fs.AbstractFileSystemTest;
 import org.apache.jackrabbit.core.fs.FileSystem;
-import org.apache.jackrabbit.core.util.db.ConnectionFactory;
 
 /**
  * Tests the Apache Derby file system.
  */
 public class DerbyFileSystemTest extends AbstractFileSystemTest {
 
-    private ConnectionFactory conFac;
-
     private File file;
 
     protected FileSystem getFileSystem() {
         DerbyFileSystem filesystem = new DerbyFileSystem();
-        filesystem.setConnectionFactory(conFac);
         filesystem.setUrl("jdbc:derby:" + file.getPath() + ";create=true");
         return filesystem;
     }
@@ -42,13 +38,20 @@
     protected void setUp() throws Exception {
         file = File.createTempFile("jackrabbit", "derbyfs");
         file.delete();
-        conFac = new ConnectionFactory();
         super.setUp();
     }
 
     protected void tearDown() throws Exception {
         super.tearDown();
-        FileUtils.deleteDirectory(file);
-        conFac.close();
+        delete(file);
     }
+
+    private void delete(File file) throws IOException {
+        File[] files = file.listFiles();
+        for (int i = 0; files != null && i < files.length; i++) {
+            delete(files[i]);
+        }
+        file.delete();
+    }
+
 }
Index: jackrabbit-core/src/test/java/org/apache/jackrabbit/core/integration/WorkspaceInitTest.java
===================================================================
--- jackrabbit-core/src/test/java/org/apache/jackrabbit/core/integration/WorkspaceInitTest.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/test/java/org/apache/jackrabbit/core/integration/WorkspaceInitTest.java	(.../trunk)	(revision 827970)
@@ -48,7 +48,7 @@
             Thread t = new Thread(new Runnable() {
                 public void run() {
                     try {
-                        getHelper().getSuperuserSession("wsp-init-test").logout();
+                        getHelper().getSuperuserSession("workspace-init-test").logout();
                     } catch (RepositoryException e) {
                         throw new RuntimeException(e);
                     }
Index: jackrabbit-core/src/test/java/org/apache/jackrabbit/core/config/DataSourceConfigTest.java
===================================================================
--- jackrabbit-core/src/test/java/org/apache/jackrabbit/core/config/DataSourceConfigTest.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/test/java/org/apache/jackrabbit/core/config/DataSourceConfigTest.java	(.../trunk)	(revision 827970)
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.jackrabbit.core.config;
-
-import java.util.Properties;
-
-import org.apache.jackrabbit.core.config.DataSourceConfig.DataSourceDefinition;
-
-import junit.framework.TestCase;
-
-public class DataSourceConfigTest extends TestCase {
-
-    private DataSourceConfig cfg;
-
-    private Properties minimalProps;
-
-    private Properties minimalProps2;
-
-    @Override
-    public void setUp() {
-        cfg = new DataSourceConfig();
-        minimalProps = new Properties();
-        minimalProps.put(DataSourceConfig.LOGICAL_NAME, "ds");
-        minimalProps.put(DataSourceConfig.DRIVER, "org.apache.derby.jdbc.EmbeddedDriver");
-        minimalProps.put(DataSourceConfig.URL, "url");
-        minimalProps.put(DataSourceConfig.DB_TYPE, "dbType");
-        minimalProps2 = new Properties();
-        minimalProps2.put(DataSourceConfig.LOGICAL_NAME, "ds2");
-        minimalProps2.put(DataSourceConfig.DRIVER, "org.apache.derby.jdbc.EmbeddedDriver");
-        minimalProps2.put(DataSourceConfig.URL, "url2");
-        minimalProps2.put(DataSourceConfig.DB_TYPE, "dbType2");
-    }
-
-    public void testEmptyConfig() {
-        assertEquals(0, cfg.getDefinitions().size());
-    }
-
-    public void testMinimalRegularConfig() throws ConfigurationException {
-        cfg.addDataSourceDefinition(minimalProps);
-        DataSourceDefinition def = cfg.getDefinitions().get(0);
-        assertEquals("ds", def.getLogicalName());
-        assertEquals("org.apache.derby.jdbc.EmbeddedDriver", def.getDriver());
-        assertEquals("url", def.getUrl());
-        assertEquals("dbType", def.getDbType());
-        // check default values:
-        assertNull(def.getUser());
-        assertNull(def.getPassword());
-        assertNull(def.getValidationQuery());
-        assertEquals(-1, def.getMaxPoolSize()); // unlimited
-    }
-
-    public void testMultipleDefs() throws ConfigurationException {
-        cfg.addDataSourceDefinition(minimalProps);
-        cfg.addDataSourceDefinition(minimalProps2);
-        assertEquals(2, cfg.getDefinitions().size());
-    }
-
-    public void testTooMinimalConfig() {
-        try {
-            minimalProps.remove(DataSourceConfig.URL);
-            cfg.addDataSourceDefinition(minimalProps);
-            fail();
-        } catch (ConfigurationException e) {
-            // expected
-        }
-    }
-
-    public void testInvalidProperty() {
-        try {
-            minimalProps.put("unknown property", "value");
-            cfg.addDataSourceDefinition(minimalProps);
-            fail();
-        } catch (ConfigurationException e) {
-            // expected
-        }
-    }
-
-    public void testUnparseableProperty() {
-        try {
-            minimalProps.put(DataSourceConfig.MAX_POOL_SIZE, "no int");
-            cfg.addDataSourceDefinition(minimalProps);
-            fail();
-        } catch (ConfigurationException e) {
-            // expected
-        }
-    }
-
-    public void testDuplicateLogicalName() throws ConfigurationException {
-        cfg.addDataSourceDefinition(minimalProps);
-        minimalProps2.put(DataSourceConfig.LOGICAL_NAME, "ds");
-        try {
-            cfg.addDataSourceDefinition(minimalProps2);
-            fail();
-        } catch (ConfigurationException e) {
-            // expected
-        }
-    }
-    
-    /**
-     * It only makes sense to configure driver, url, username, password and dbType for
-     * a DataSource which is to be obtained from JNDI.
-     * 
-     * @throws ConfigurationException
-     */
-    public void testConfiguredJNDIConfig() throws ConfigurationException {
-        minimalProps.put(DataSourceConfig.DRIVER, "javax.naming.InitialContext");
-        minimalProps.put(DataSourceConfig.MAX_POOL_SIZE, "10");
-        try {
-            cfg.addDataSourceDefinition(minimalProps);
-            fail();
-        } catch (ConfigurationException e) {
-            // expected
-        }
-    }
-}
Index: jackrabbit-core/src/test/java/org/apache/jackrabbit/core/config/WorkspaceConfigTest.java
===================================================================
--- jackrabbit-core/src/test/java/org/apache/jackrabbit/core/config/WorkspaceConfigTest.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/test/java/org/apache/jackrabbit/core/config/WorkspaceConfigTest.java	(.../trunk)	(revision 827970)
@@ -34,7 +34,7 @@
     protected void setUp() {
         Properties variables = new Properties();
         variables.setProperty("wsp.home", "target");
-        parser = new RepositoryConfigurationParser(variables, null);
+        parser = new RepositoryConfigurationParser(variables);
     }
 
     /**
Index: jackrabbit-core/src/test/java/org/apache/jackrabbit/core/config/TestAll.java
===================================================================
--- jackrabbit-core/src/test/java/org/apache/jackrabbit/core/config/TestAll.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/test/java/org/apache/jackrabbit/core/config/TestAll.java	(.../trunk)	(revision 827970)
@@ -39,7 +39,6 @@
 
         suite.addTestSuite(RepositoryConfigTest.class);
         suite.addTestSuite(WorkspaceConfigTest.class);
-        suite.addTestSuite(DataSourceConfigTest.class);
 
         return suite;
     }
Index: jackrabbit-core/src/test/java/org/apache/jackrabbit/core/config/SecurityConfigTest.java
===================================================================
--- jackrabbit-core/src/test/java/org/apache/jackrabbit/core/config/SecurityConfigTest.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/test/java/org/apache/jackrabbit/core/config/SecurityConfigTest.java	(.../trunk)	(revision 827970)
@@ -50,7 +50,7 @@
 
     protected void setUp() throws Exception {
         super.setUp();
-        parser = new RepositoryConfigurationParser(new Properties(), null);
+        parser = new RepositoryConfigurationParser(new Properties());
     }
 
     protected void tearDown() throws Exception {
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/db/SimpleDbPersistenceManager.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/db/SimpleDbPersistenceManager.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/db/SimpleDbPersistenceManager.java	(.../trunk)	(revision 827970)
@@ -16,8 +16,7 @@
  */
 package org.apache.jackrabbit.core.persistence.db;
 
-import org.apache.jackrabbit.core.util.db.ConnectionFactory;
-import org.apache.jackrabbit.core.util.db.DatabaseAware;
+import org.apache.jackrabbit.core.persistence.bundle.util.ConnectionFactory;
 
 import java.sql.Connection;
 import java.sql.SQLException;
@@ -130,25 +129,13 @@
  * </pre>
  * See also {@link DerbyPersistenceManager}, {@link OraclePersistenceManager}.
  */
-public class SimpleDbPersistenceManager extends DatabasePersistenceManager implements DatabaseAware {
+public class SimpleDbPersistenceManager extends DatabasePersistenceManager {
 
     protected String driver;
     protected String url;
     protected String user;
     protected String password;
 
-    /**
-     * The repositories {@link ConnectionFactory}.
-     */
-    private ConnectionFactory connectionFactory;
-
-    /**
-     * {@inheritDoc}
-     */
-    public void setConnectionFactory(ConnectionFactory connnectionFactory) {
-        this.connectionFactory = connnectionFactory;
-    }
-
     //----------------------------------------------------< setters & getters >
     public String getUrl() {
         return url;
@@ -182,6 +169,8 @@
         this.driver = driver;
     }
 
+    //------------------------------------------< DatabasePersistenceManager >
+
     /**
      * Returns a JDBC connection acquired using the JDBC {@link DriverManager}.
      * @throws SQLException
@@ -191,7 +180,7 @@
      * @see DatabasePersistenceManager#getConnection()
      */
     protected Connection getConnection() throws RepositoryException, SQLException {
-        return connectionFactory.getDataSource(driver, url, user, password).getConnection();
+        return ConnectionFactory.getConnection(driver, url, user, password);
     }
 
 }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/BundleDbPersistenceManager.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/BundleDbPersistenceManager.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/BundleDbPersistenceManager.java	(.../trunk)	(revision 827970)
@@ -16,6 +16,7 @@
  */
 package org.apache.jackrabbit.core.persistence.bundle;
 
+import java.io.BufferedReader;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
@@ -24,27 +25,33 @@
 import java.io.FilterInputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.InputStreamReader;
 import java.sql.Blob;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 
 import javax.jcr.RepositoryException;
-import javax.sql.DataSource;
 
 import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.core.id.NodeId;
+import org.apache.jackrabbit.core.id.PropertyId;
 import org.apache.jackrabbit.core.fs.FileSystem;
 import org.apache.jackrabbit.core.fs.FileSystemResource;
 import org.apache.jackrabbit.core.fs.local.LocalFileSystem;
-import org.apache.jackrabbit.core.id.NodeId;
-import org.apache.jackrabbit.core.id.PropertyId;
 import org.apache.jackrabbit.core.persistence.PMContext;
 import org.apache.jackrabbit.core.persistence.bundle.util.BundleBinding;
+import org.apache.jackrabbit.core.persistence.bundle.util.ConnectionRecoveryManager;
 import org.apache.jackrabbit.core.persistence.bundle.util.DbNameIndex;
 import org.apache.jackrabbit.core.persistence.bundle.util.ErrorHandling;
 import org.apache.jackrabbit.core.persistence.bundle.util.NodePropBundle;
+import org.apache.jackrabbit.core.util.StringIndex;
 import org.apache.jackrabbit.core.persistence.util.BLOBStore;
 import org.apache.jackrabbit.core.persistence.util.FileSystemBLOBStore;
 import org.apache.jackrabbit.core.persistence.util.Serializer;
@@ -52,13 +59,7 @@
 import org.apache.jackrabbit.core.state.ItemStateException;
 import org.apache.jackrabbit.core.state.NoSuchItemStateException;
 import org.apache.jackrabbit.core.state.NodeReferences;
-import org.apache.jackrabbit.core.util.StringIndex;
-import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
-import org.apache.jackrabbit.core.util.db.ConnectionFactory;
-import org.apache.jackrabbit.core.util.db.ConnectionHelper;
-import org.apache.jackrabbit.core.util.db.DatabaseAware;
-import org.apache.jackrabbit.core.util.db.DbUtility;
-import org.apache.jackrabbit.core.util.db.StreamWrapper;
+import org.apache.jackrabbit.util.Text;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -83,11 +84,15 @@
  * <li>&lt;param name="{@link #setSchemaCheckEnabled(String) schemaCheckEnabled}" value="true"/>
  * </ul>
  */
-public class BundleDbPersistenceManager extends AbstractBundlePersistenceManager implements DatabaseAware {
+public class BundleDbPersistenceManager extends AbstractBundlePersistenceManager {
 
     /** the default logger */
     private static Logger log = LoggerFactory.getLogger(BundleDbPersistenceManager.class);
 
+    /** the variable for the schema prefix */
+    public static final String SCHEMA_OBJECT_PREFIX_VARIABLE =
+            "${schemaObjectPrefix}";
+
     /** storage model modifier: binary keys */
     public static final int SM_BINARY_KEYS = 1;
 
@@ -112,12 +117,6 @@
     /** the database type */
     protected String databaseType;
 
-    /** the logical name of the data source to use */
-    protected String dataSourceName;
-
-    /** the {@link ConnectionHelper} set in the {@link #init(PMContext)} method */
-    protected ConnectionHelper conHelper;
-
     /** the prefix for the database objects */
     protected String schemaObjectPrefix;
 
@@ -136,6 +135,11 @@
     /** indicates whether to block if the database connection is lost */
     protected boolean blockOnConnectionLoss;
 
+    /**
+     * The class that manages statement execution and recovery from connection loss.
+     */
+    protected ConnectionRecoveryManager connectionManager;
+
     // SQL statements for bundle management
     protected String bundleInsertSQL;
     protected String bundleUpdateSQL;
@@ -182,19 +186,8 @@
      */
     private boolean schemaCheckEnabled = true;
 
-    /**
-     * The repositories {@link ConnectionFactory}.
-     */
-    private ConnectionFactory connectionFactory;
 
     /**
-     * {@inheritDoc}
-     */
-    public void setConnectionFactory(ConnectionFactory connnectionFactory) {
-        this.connectionFactory = connnectionFactory;
-    }
-
-    /**
      * Returns the configured JDBC connection url.
      * @return the configured JDBC connection url.
      */
@@ -329,14 +322,6 @@
         this.databaseType = databaseType;
     }
 
-    public String getDataSourceName() {
-        return dataSourceName;
-    }
-
-    public void setDataSourceName(String dataSourceName) {
-        this.dataSourceName = dataSourceName;
-    }
-
     /**
      * Returns if uses external (filesystem) blob store.
      * @return if uses external (filesystem) blob store.
@@ -463,36 +448,139 @@
     }
 
     /**
+     * Checks if the required schema objects exist and creates them if they
+     * don't exist yet.
+     *
+     * @throws SQLException if an SQL error occurs.
+     * @throws RepositoryException if an error occurs.
+     */
+    protected void checkSchema() throws SQLException, RepositoryException {
+        if (!checkTablesExist()) {
+            // read ddl from resources
+            InputStream in = BundleDbPersistenceManager.class.getResourceAsStream(databaseType + ".ddl");
+            if (in == null) {
+                String msg = "Configuration error: The resource '" + databaseType + ".ddl' could not be found";
+                log.debug(msg);
+                throw new RepositoryException(msg);
+            }
+            BufferedReader reader = new BufferedReader(new InputStreamReader(in));
+            Statement stmt = connectionManager.getConnection().createStatement();
+            String sql = null;
+            try {
+                sql = reader.readLine();
+                while (sql != null) {
+                    if (!sql.startsWith("#") && sql.length() > 0
+                            && (sql.indexOf("BINVAL") < 0 || useDbBlobStore())) {
+                        // only create blob related tables of db blob store configured
+                        // execute sql stmt
+                        sql = createSchemaSQL(sql);
+                        stmt.executeUpdate(sql);
+                    }
+                    // read next sql stmt
+                    sql = reader.readLine();
+                }
+            } catch (IOException e) {
+                String msg = "Configuration error: unable to read the resource '" + databaseType + ".ddl': " + e;
+                log.debug(msg);
+                throw new RepositoryException(msg, e);
+            } catch (SQLException e) {
+                String msg = "Schema generation error: Issuing statement: " + sql;
+                SQLException se = new SQLException(msg);
+                se.initCause(e);
+                throw se;
+            } finally {
+                IOUtils.closeQuietly(in);
+                stmt.close();
+            }
+        }
+    }
+
+    /**
+     * Creates an SQL statement for schema creation by variable substitution.
+     *
+     * @param sql a SQL string which may contain variables to substitute
+     * @return a valid SQL string
+     */
+    protected String createSchemaSQL(String sql) {
+        // replace prefix variable
+        return Text.replace(sql, SCHEMA_OBJECT_PREFIX_VARIABLE, schemaObjectPrefix).trim();
+    }
+
+    /**
+     * Checks if the database table exist.
+     *
+     * @return <code>true</code> if the tables exist;
+     *         <code>false</code> otherwise.
+     *
+     * @throws SQLException if a database error occurs.
+     * @throws RepositoryException if a repository exception occurs.
+     */
+    protected boolean checkTablesExist() throws SQLException, RepositoryException {
+        DatabaseMetaData metaData = connectionManager.getConnection().getMetaData();
+        String tableName = schemaObjectPrefix + "BUNDLE";
+        if (metaData.storesLowerCaseIdentifiers()) {
+            tableName = tableName.toLowerCase();
+        } else if (metaData.storesUpperCaseIdentifiers()) {
+            tableName = tableName.toUpperCase();
+        }
+        String userName = checkTablesWithUser() ? metaData.getUserName() : null;
+        ResultSet rs = metaData.getTables(null, userName, tableName, null);
+        try {
+            return rs.next();
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Indicates if the user name should be included when retrieving the tables
+     * during {@link #checkTablesExist()}.
+     * <p/>
+     * Please note that this currently only needs to be changed for oracle based
+     * persistence managers.
+     *
+     * @return <code>false</code>
+     */
+    protected boolean checkTablesWithUser() {
+        return false;
+    }
+
+    /**
      * {@inheritDoc}
      *
      * Basically wraps a JDBC transaction around super.store().
      */
     public synchronized void store(ChangeLog changeLog) throws ItemStateException {
-        int failures = 0;
-        ItemStateException lastException = null;
-        while (failures <= 1) { // retry once
+        int trials = 2;
+        Throwable lastException  = null;
+        do {
+            trials--;
+            Connection con = null;
             try {
-                conHelper.startBatch();
+                con = connectionManager.getConnection();
+                connectionManager.setAutoReconnect(false);
+                con.setAutoCommit(false);
                 super.store(changeLog);
-                conHelper.endBatch(true);
+                con.commit();
+                con.setAutoCommit(true);
                 return;
-            } catch (SQLException e) {
-                // Either startBatch or stopBatch threw it: either way the
-                // transaction was not persisted and no action needs to be taken.
-                failures++;
-                lastException = new ItemStateException(e.getMessage(), e);
-            } catch (ItemStateException e) {
-                // store call threw it: we need to cancel the transaction
-                failures++;
-                lastException = e;
+            } catch (Throwable th) {
+                lastException = th;
                 try {
-                    conHelper.endBatch(false);
-                } catch (SQLException e2) {
-                    DbUtility.logException("rollback failed", e2);
+                    if (con != null) {
+                        con.rollback();
+                    }
+                } catch (SQLException e) {
+                    logException("rollback failed", e);
                 }
+                if (th instanceof SQLException || th.getCause() instanceof SQLException) {
+                    connectionManager.close();
+                }
+            } finally {
+                connectionManager.setAutoReconnect(true);
             }
-        }
-        throw lastException;
+        } while(blockOnConnectionLoss || trials > 0);
+        throw new ItemStateException(lastException.getMessage(), lastException);
     }
 
     /**
@@ -504,16 +592,17 @@
         }
         super.init(context);
 
-        conHelper = createConnectionHelper(getDataSource());
-        
-        this.name = context.getHomeDir().getName();        
+        this.name = context.getHomeDir().getName();
 
+        connectionManager = new ConnectionRecoveryManager(blockOnConnectionLoss,
+                getDriver(), getUrl(), getUser(), getPassword());
+
         // make sure schemaObjectPrefix consists of legal name characters only
-        schemaObjectPrefix = conHelper.prepareDbIdentifier(schemaObjectPrefix);
+        prepareSchemaObjectPrefix();
 
         // check if schema objects exist and create them if necessary
         if (isSchemaCheckEnabled()) {
-            createCheckSchemaOperation().run();
+            checkSchema();
         }
 
         // create correct blob store
@@ -531,48 +620,9 @@
             // check all bundles
             checkConsistency(null, true, consistencyFix);
         }
-                
     }
 
-    private DataSource getDataSource() throws Exception {
-        if (getDataSourceName() == null || "".equals(getDataSourceName())) {
-            return connectionFactory.getDataSource(getDriver(), getUrl(), getUser(), getPassword());
-        } else {
-            String dbType = connectionFactory.getDataBaseType(dataSourceName);
-            if (BundleDbPersistenceManager.class.getResourceAsStream(dbType + ".ddl") != null) {
-                setDatabaseType(dbType);
-            }
-            return connectionFactory.getDataSource(dataSourceName);
-        }
-    }
-
     /**
-     * This method is called from the {@link #init(PMContext)} method of this class and returns a
-     * {@link ConnectionHelper} instance which is assigned to the {@code conHelper} field. Subclasses may
-     * override it to return a specialized connection helper.
-     * 
-     * @param dataSrc the {@link DataSource} of this persistence manager
-     * @return a {@link ConnectionHelper}
-     * @throws Exception on error
-     */
-    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
-        return new ConnectionHelper(dataSrc, blockOnConnectionLoss);
-    }
-
-    /**
-     * This method is called from {@link #init(PMContext)} after the
-     * {@link #createConnectionHelper(DataSource)} method, and returns a default {@link CheckSchemaOperation}.
-     * Subclasses can overrride this implementation to get a customized implementation.
-     * 
-     * @return a new {@link CheckSchemaOperation} instance
-     */
-    protected CheckSchemaOperation createCheckSchemaOperation() {
-        InputStream in = BundleDbPersistenceManager.class.getResourceAsStream(databaseType + ".ddl");
-        return new CheckSchemaOperation(conHelper, in, schemaObjectPrefix + "BUNDLE").addVariableReplacement(
-            CheckSchemaOperation.SCHEMA_OBJECT_PREFIX_VARIABLE, schemaObjectPrefix);
-    }
-
-    /**
      * {@inheritDoc}
      */
     protected BundleBinding getBinding() {
@@ -623,7 +673,7 @@
      * @throws SQLException if an SQL error occurs.
      */
     protected DbNameIndex createDbNameIndex() throws SQLException {
-        return new DbNameIndex(conHelper, schemaObjectPrefix);
+        return new DbNameIndex(connectionManager, schemaObjectPrefix);
     }
 
     /**
@@ -740,33 +790,40 @@
         }
     }
 
+    /**
+     * {@inheritDoc}
+     */
     public void checkConsistency(String[] uuids, boolean recursive, boolean fix) {
+        log.info("{}: checking workspace consistency...", name);
+
         int count = 0;
         int total = 0;
-        Collection<NodePropBundle> modifications = new ArrayList<NodePropBundle>();        
-        
+        Collection<NodePropBundle> modifications = new ArrayList<NodePropBundle>();
+
         if (uuids == null) {
             // get all node bundles in the database with a single sql statement,
-            // which is (probably) faster than loading each bundle and traversing the tree        	
+            // which is (probably) faster than loading each bundle and traversing the tree
             ResultSet rs = null;
-            try {            	
+            try {
                 String sql = "select count(*) from " + schemaObjectPrefix + "BUNDLE";
-                rs = conHelper.exec(sql, new Object[0], false, 0);
+                Statement stmt = connectionManager.executeStmt(sql, new Object[0]);
                 try {
+                    rs = stmt.getResultSet();
                     if (!rs.next()) {
                         log.error("Could not retrieve total number of bundles. empty result set.");
                         return;
                     }
                     total = rs.getInt(1);
                 } finally {
-                    DbUtility.close(rs);
+                    closeResultSet(rs);
                 }
                 if (getStorageModel() == SM_BINARY_KEYS) {
                     sql = "select NODE_ID from " + schemaObjectPrefix + "BUNDLE";
                 } else {
                     sql = "select NODE_ID_HI, NODE_ID_LO from " + schemaObjectPrefix + "BUNDLE";
                 }
-                rs = conHelper.exec(sql, new Object[0], false, 0);
+                stmt = connectionManager.executeStmt(sql, new Object[0]);
+                rs = stmt.getResultSet();
 
                 // iterate over all node bundles in the db
                 while (rs.next()) {
@@ -781,14 +838,15 @@
                     ResultSet bRs = null;
                     byte[] data = null;
                     try {
-                        bRs = conHelper.exec(bundleSelectSQL, getKey(id), false, 0);
+                        Statement bSmt = connectionManager.executeStmt(bundleSelectSQL, getKey(id));
+                        bRs = bSmt.getResultSet();
                         if (!bRs.next()) {
                             throw new SQLException("bundle cannot be retrieved?");
                         }
                         Blob blob = bRs.getBlob(1);
                         data = getBytes(blob);
                     } finally {
-                        DbUtility.close(bRs);
+                        closeResultSet(bRs);
                     }
 
 
@@ -814,8 +872,8 @@
                 }
             } catch (Exception e) {
                 log.error("Error loading bundle", e);
-            } finally {            	
-                DbUtility.close(rs);
+            } finally {
+                closeResultSet(rs);
                 total = count;
             }
         } else {
@@ -836,7 +894,7 @@
                     log.error("Invalid uuid for consistency check, skipping: '" + uuids[i] + "': " + e);
                 }
             }
-            
+
             // iterate over UUIDs (including ones that are newly added inside the loop!)
             for (int i = 0; i < idList.size(); i++) {
                 NodeId id = idList.get(i);
@@ -888,6 +946,35 @@
     }
 
     /**
+     * Makes sure that <code>schemaObjectPrefix</code> does only consist of
+     * characters that are allowed in names on the target database. Illegal
+     * characters will be escaped as necessary.
+     *
+     * @throws Exception if an error occurs
+     */
+    protected void prepareSchemaObjectPrefix() throws Exception {
+        DatabaseMetaData metaData = connectionManager.getConnection().getMetaData();
+        String legalChars = metaData.getExtraNameCharacters();
+        legalChars += "ABCDEFGHIJKLMNOPQRSTUVWXZY0123456789_";
+
+        String prefix = schemaObjectPrefix.toUpperCase();
+        StringBuffer escaped = new StringBuffer();
+        for (int i = 0; i < prefix.length(); i++) {
+            char c = prefix.charAt(i);
+            if (legalChars.indexOf(c) == -1) {
+                escaped.append("_x");
+                String hex = Integer.toHexString(c);
+                escaped.append("0000".toCharArray(), 0, 4 - hex.length());
+                escaped.append(hex);
+                escaped.append("_");
+            } else {
+                escaped.append(c);
+            }
+        }
+        schemaObjectPrefix = escaped.toString();
+    }
+
+    /**
      * {@inheritDoc}
      */
     public synchronized void close() throws Exception {
@@ -898,7 +985,8 @@
         try {
             if (nameIndex instanceof DbNameIndex) {
                 ((DbNameIndex) nameIndex).close();
-            }            
+            }
+            connectionManager.close();
             // close blob store
             blobStore.close();
             blobStore = null;
@@ -979,7 +1067,8 @@
                 // see also bundleSelectAllIdsFrom SQL statement
                 maxCount += 10;
             }
-            rs = conHelper.exec(sql, keys, false, maxCount);
+            Statement stmt = connectionManager.executeStmt(sql, keys, false, maxCount);
+            rs = stmt.getResultSet();
             ArrayList<NodeId> result = new ArrayList<NodeId>();
             while ((maxCount == 0 || result.size() < maxCount) && rs.next()) {
                 NodeId current;
@@ -1004,7 +1093,7 @@
             log.error(msg, e);
             throw new ItemStateException(msg, e);
         } finally {
-            DbUtility.close(rs);
+            closeResultSet(rs);
         }
     }
 
@@ -1054,9 +1143,9 @@
     protected synchronized NodePropBundle loadBundle(NodeId id, boolean checkBeforeLoading)
             throws ItemStateException {
         ResultSet rs = null;
-        
         try {
-            rs = conHelper.exec(bundleSelectSQL, getKey(id), false, 0);
+            Statement stmt = connectionManager.executeStmt(bundleSelectSQL, getKey(id));
+            rs = stmt.getResultSet();
             if (!rs.next()) {
                 return null;
             }
@@ -1082,7 +1171,7 @@
             log.error(msg);
             throw new ItemStateException(msg, e);
         } finally {
-            DbUtility.close(rs);
+            closeResultSet(rs);
         }
     }
 
@@ -1092,7 +1181,8 @@
     protected synchronized boolean existsBundle(NodeId id) throws ItemStateException {
         ResultSet rs = null;
         try {
-            rs = conHelper.exec(bundleSelectSQL, getKey(id), false, 0);
+            Statement stmt = connectionManager.executeStmt(bundleSelectSQL, getKey(id));
+            rs = stmt.getResultSet();
             // a bundle exists, if the result has at least one entry
             return rs.next();
         } catch (Exception e) {
@@ -1100,7 +1190,7 @@
             log.error(msg, e);
             throw new ItemStateException(msg, e);
         } finally {
-            DbUtility.close(rs);
+            closeResultSet(rs);
         }
     }
 
@@ -1116,7 +1206,7 @@
 
             String sql = bundle.isNew() ? bundleInsertSQL : bundleUpdateSQL;
             Object[] params = createParams(bundle.getId(), out.toByteArray(), true);
-            conHelper.update(sql, params);
+            connectionManager.executeStmt(sql, params);
         } catch (Exception e) {
             String msg = "failed to write bundle: " + bundle.getId();
             log.error(msg, e);
@@ -1129,7 +1219,7 @@
      */
     protected synchronized void destroyBundle(NodePropBundle bundle) throws ItemStateException {
         try {
-            conHelper.update(bundleDeleteSQL, getKey(bundle.getId()));
+            connectionManager.executeStmt(bundleDeleteSQL, getKey(bundle.getId()));
         } catch (Exception e) {
             if (e instanceof NoSuchItemStateException) {
                 throw (NoSuchItemStateException) e;
@@ -1152,7 +1242,9 @@
         ResultSet rs = null;
         InputStream in = null;
         try {
-            rs = conHelper.exec(nodeReferenceSelectSQL, getKey(targetId), false, 0);
+            Statement stmt = connectionManager.executeStmt(
+                    nodeReferenceSelectSQL, getKey(targetId));
+            rs = stmt.getResultSet();
             if (!rs.next()) {
                 throw new NoSuchItemStateException(targetId.toString());
             }
@@ -1171,7 +1263,7 @@
             throw new ItemStateException(msg, e);
         } finally {
             IOUtils.closeQuietly(in);
-            DbUtility.close(rs);
+            closeResultSet(rs);
         }
     }
 
@@ -1200,8 +1292,8 @@
             Serializer.serialize(refs, out);
 
             Object[] params = createParams(refs.getTargetId(), out.toByteArray(), true);
-            conHelper.exec(sql, params);
-            
+            connectionManager.executeStmt(sql, params);
+
             // there's no need to close a ByteArrayOutputStream
             //out.close();
         } catch (Exception e) {
@@ -1220,7 +1312,8 @@
         }
 
         try {
-            conHelper.exec(nodeReferenceDeleteSQL, getKey(refs.getTargetId()));
+            connectionManager.executeStmt(nodeReferenceDeleteSQL,
+                    getKey(refs.getTargetId()));
         } catch (Exception e) {
             if (e instanceof NoSuchItemStateException) {
                 throw (NoSuchItemStateException) e;
@@ -1241,7 +1334,9 @@
 
         ResultSet rs = null;
         try {
-            rs = conHelper.exec(nodeReferenceSelectSQL, getKey(targetId), false, 0);
+            Statement stmt = connectionManager.executeStmt(
+                    nodeReferenceSelectSQL, getKey(targetId));
+            rs = stmt.getResultSet();
 
             // a reference exists if the result has at least one entry
             return rs.next();
@@ -1251,11 +1346,71 @@
             log.error(msg, e);
             throw new ItemStateException(msg, e);
         } finally {
-            DbUtility.close(rs);
+            closeResultSet(rs);
         }
     }
 
     /**
+     * Resets the given <code>PreparedStatement</code> by clearing the
+     * parameters and warnings contained.
+     *
+     * @param stmt The <code>PreparedStatement</code> to reset. If
+     *             <code>null</code> this method does nothing.
+     */
+    protected synchronized void resetStatement(PreparedStatement stmt) {
+        if (stmt != null) {
+            try {
+                stmt.clearParameters();
+                stmt.clearWarnings();
+            } catch (SQLException se) {
+                logException("Failed resetting PreparedStatement", se);
+            }
+        }
+    }
+
+    /**
+     * Closes the result set
+     * @param rs the result set
+     */
+    protected void closeResultSet(ResultSet rs) {
+        if (rs != null) {
+            try {
+                rs.close();
+            } catch (SQLException se) {
+                logException("Failed closing ResultSet", se);
+            }
+        }
+    }
+
+    /**
+     * closes the statement
+     * @param stmt the statement
+     */
+    protected void closeStatement(PreparedStatement stmt) {
+        if (stmt != null) {
+            try {
+                stmt.close();
+            } catch (SQLException se) {
+                logException("Failed closing PreparedStatement", se);
+            }
+        }
+    }
+
+    /**
+     * logs an sql exception
+     * @param message the message
+     * @param e the exception
+     */
+    protected void logException(String message, SQLException e) {
+        if (message != null) {
+            log.error(message);
+        }
+        log.error("       Reason: " + e.getMessage());
+        log.error("   State/Code: " + e.getSQLState() + "/" + e.getErrorCode());
+        log.debug("   dump:", e);
+    }
+
+    /**
      * @inheritDoc
      */
     public String toString() {
@@ -1378,40 +1533,31 @@
          * {@inheritDoc}
          */
         public InputStream get(String blobId) throws Exception {
-            ResultSet rs = null;
-            boolean close = true;
-            try {
-                rs = conHelper.exec(blobSelectSQL, new Object[]{blobId}, false, 0);
-                if (!rs.next()) {
-                    throw new Exception("no such BLOB: " + blobId);
-                }
+            Statement stmt = connectionManager.executeStmt(blobSelectSQL, new Object[]{blobId});
+            final ResultSet rs = stmt.getResultSet();
+            if (!rs.next()) {
+                closeResultSet(rs);
+                throw new Exception("no such BLOB: " + blobId);
+            }
+            InputStream in = rs.getBinaryStream(1);
+            if (in == null) {
+                // some databases treat zero-length values as NULL;
+                // return empty InputStream in such a case
+                closeResultSet(rs);
+                return new ByteArrayInputStream(new byte[0]);
+            }
 
-                InputStream in = rs.getBinaryStream(1);
-                if (in == null) {
-                    // some databases treat zero-length values as NULL;
-                    // return empty InputStream in such a case
-                    return new ByteArrayInputStream(new byte[0]);
+            /**
+             * return an InputStream wrapper in order to
+             * close the ResultSet when the stream is closed
+             */
+            return new FilterInputStream(in) {
+                public void close() throws IOException {
+                    in.close();
+                    // now it's safe to close ResultSet
+                    closeResultSet(rs);
                 }
-
-                 // return an InputStream wrapper in order to close the ResultSet when the stream is closed
-                close = false;
-                final ResultSet rs2 = rs;
-                return new FilterInputStream(in) {
-
-                    public void close() throws IOException {
-                        try {
-                            in.close();
-                        } finally {
-                            // now it's safe to close ResultSet
-                            DbUtility.close(rs2);
-                        }
-                    }
-                };
-            } finally {
-                if (close) {
-                    DbUtility.close(rs);
-                }
-            }
+            };
         }
 
         /**
@@ -1419,25 +1565,23 @@
          */
         public synchronized void put(String blobId, InputStream in, long size)
                 throws Exception {
-            ResultSet rs = null;
-            boolean exists;
-        	try {
-	            rs = conHelper.exec(blobSelectExistSQL, new Object[]{blobId}, false, 0);
-	            // a BLOB exists if the result has at least one entry
-	            exists = rs.next();
-        	} finally {
-	            DbUtility.close(rs);
-        	}
+            Statement stmt = connectionManager.executeStmt(blobSelectExistSQL, new Object[]{blobId});
+            ResultSet rs = stmt.getResultSet();
+            // a BLOB exists if the result has at least one entry
+            boolean exists = rs.next();
+            closeResultSet(rs);
+
             String sql = (exists) ? blobUpdateSQL : blobInsertSQL;
-            Object[] params = new Object[]{new StreamWrapper(in, size), blobId};
-            conHelper.exec(sql, params);
+            Object[] params = new Object[]{new ConnectionRecoveryManager.StreamWrapper(in, size), blobId};
+            connectionManager.executeStmt(sql, params);
         }
 
         /**
          * {@inheritDoc}
          */
         public synchronized boolean remove(String blobId) throws Exception {
-            return conHelper.update(blobDeleteSQL, new Object[]{blobId}) == 1;
+            Statement stmt = connectionManager.executeStmt(blobDeleteSQL, new Object[]{blobId});
+            return stmt.getUpdateCount() == 1;
         }
 
         public void close() {
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/MSSqlPersistenceManager.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/MSSqlPersistenceManager.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/MSSqlPersistenceManager.java	(.../trunk)	(revision 827970)
@@ -16,10 +16,12 @@
  */
 package org.apache.jackrabbit.core.persistence.bundle;
 
-import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
+import org.apache.jackrabbit.util.Text;
 
 /**
- * Extends the {@link BundleDbPersistenceManager} by MS-SQL specific code. <p/> Configuration:<br>
+ * Extends the {@link BundleDbPersistenceManager} by MS-SQL specific code.
+ * <p/>
+ * Configuration:<br>
  * <ul>
  * <li>&lt;param name="{@link #setBundleCacheSize(String) bundleCacheSize}" value="8"/>
  * <li>&lt;param name="{@link #setConsistencyCheck(String) consistencyCheck}" value="false"/>
@@ -44,18 +46,14 @@
         setDatabaseType("mssql");
     }
 
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected CheckSchemaOperation createCheckSchemaOperation() {
-        return super.createCheckSchemaOperation().addVariableReplacement(
-            CheckSchemaOperation.TABLE_SPACE_VARIABLE, tableSpace);
+    protected String createSchemaSQL(String sql) {
+        return Text.replace(
+                super.createSchemaSQL(sql), "${tableSpace}", tableSpace);
     }
 
     /**
      * Returns the configured MS SQL table space.
-     * 
+     *
      * @return the configured MS SQL table space.
      */
     public String getTableSpace() {
@@ -64,11 +62,11 @@
 
     /**
      * Sets the MS SQL table space.
-     * 
+     *
      * @param tableSpace the MS SQL table space.
      */
     public void setTableSpace(String tableSpace) {
-        if (tableSpace != null && tableSpace.trim().length() > 0) {
+        if (tableSpace != null && tableSpace.length() > 0) {
             this.tableSpace = "on " + tableSpace.trim();
         } else {
             this.tableSpace = "";
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/BundleFsPersistenceManager.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/BundleFsPersistenceManager.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/BundleFsPersistenceManager.java	(.../trunk)	(revision 827970)
@@ -100,6 +100,7 @@
      */
     private String name = super.toString();
 
+
     /**
      * Returns the configured block size of the blob cqfs
      * @return the block size.
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/PostgreSQLPersistenceManager.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/PostgreSQLPersistenceManager.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/PostgreSQLPersistenceManager.java	(.../trunk)	(revision 827970)
@@ -16,11 +16,6 @@
  */
 package org.apache.jackrabbit.core.persistence.bundle;
 
-import java.io.DataInputStream;
-import java.io.InputStream;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-
 import org.apache.jackrabbit.core.id.NodeId;
 import org.apache.jackrabbit.core.persistence.PMContext;
 import org.apache.jackrabbit.core.persistence.bundle.util.DbNameIndex;
@@ -28,10 +23,15 @@
 import org.apache.jackrabbit.core.persistence.bundle.util.PostgreSQLNameIndex;
 import org.apache.jackrabbit.core.persistence.bundle.util.TrackingInputStream;
 import org.apache.jackrabbit.core.state.ItemStateException;
-import org.apache.jackrabbit.core.util.db.DbUtility;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.DataInputStream;
+import java.io.InputStream;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
 /**
  * Extends the {@link BundleDbPersistenceManager} by PostgreSQL specific code.
  * <p/>
@@ -77,7 +77,7 @@
      * @throws java.sql.SQLException if an SQL error occurs.
      */
     protected DbNameIndex createDbNameIndex() throws SQLException {
-        return new PostgreSQLNameIndex(conHelper, schemaObjectPrefix);
+        return new PostgreSQLNameIndex(connectionManager, schemaObjectPrefix);
     }
 
     /**
@@ -88,37 +88,34 @@
         return SM_LONGLONG_KEYS;
     }
 
-    /**
-     * PostgreSQL needs slightly different handling of the binary value that is received:
-     * rs.getBinaryStream vs rs.getBlob in the super class.
-     * 
-     * {@inheritDoc}
-     */
     protected synchronized NodePropBundle loadBundle(NodeId id)
             throws ItemStateException {
-        ResultSet rs = null;
-        try {        	
-            rs = conHelper.exec(bundleSelectSQL, getKey(id), false, 0);
-            if (rs.next()) {
-                InputStream input = rs.getBinaryStream(1);
-                try {
-                    TrackingInputStream cin = new TrackingInputStream(input);
-                    DataInputStream din = new DataInputStream(cin);
-                    NodePropBundle bundle = binding.readBundle(din, id);
-                    bundle.setSize(cin.getPosition());
-                    return bundle;
-                } finally {
-                    input.close();
+        try {
+            Statement stmt = connectionManager.executeStmt(bundleSelectSQL, getKey(id));
+            ResultSet rs = stmt.getResultSet();
+            try {
+                if (rs.next()) {
+                    InputStream input = rs.getBinaryStream(1);
+                    try {
+                        TrackingInputStream cin = new TrackingInputStream(input);
+                        DataInputStream din = new DataInputStream(cin);
+                        NodePropBundle bundle = binding.readBundle(din, id);
+                        bundle.setSize(cin.getPosition());
+                        return bundle;
+                    } finally {
+                        input.close();
+                    }
+                } else {
+                    return null;
                 }
-            } else {
-                return null;
+            } finally {
+                rs.close();
             }
         } catch (Exception e) {
             String msg = "failed to read bundle: " + id + ": " + e;
             log.error(msg);
             throw new ItemStateException(msg, e);
-        } finally {
-           DbUtility.close(rs);
         }
     }
+
 }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/DerbyPersistenceManager.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/DerbyPersistenceManager.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/DerbyPersistenceManager.java	(.../trunk)	(revision 827970)
@@ -16,14 +16,15 @@
  */
 package org.apache.jackrabbit.core.persistence.bundle;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.jackrabbit.core.persistence.PMContext;
-import org.apache.jackrabbit.core.util.db.ConnectionHelper;
-import org.apache.jackrabbit.core.util.db.DerbyConnectionHelper;
 
-import java.sql.Connection;
+import java.sql.DriverManager;
 import java.sql.SQLException;
+import java.sql.Statement;
 
-import javax.sql.DataSource;
+import javax.jcr.RepositoryException;
 
 /**
  * Extends the {@link BundleDbPersistenceManager} by derby specific code.
@@ -53,6 +54,9 @@
     /** name of the embedded driver */
     public static final String DERBY_EMBEDDED_DRIVER = "org.apache.derby.jdbc.EmbeddedDriver";
 
+    /** the default logger */
+    private static Logger log = LoggerFactory.getLogger(DerbyPersistenceManager.class);
+
     /** @see #setDerbyStorageInitialPages(String) */
     private int derbyStorageInitialPages = 16;
 
@@ -248,29 +252,35 @@
             setSchemaObjectPrefix("");
         }
         super.init(context);
-        // set properties       
-        if (DERBY_EMBEDDED_DRIVER.equals(getDriver())) {
-            conHelper.exec("CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY "
-                    + "('derby.storage.initialPages', '" + derbyStorageInitialPages + "')");
-            conHelper.exec("CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY "
-                    + "('derby.storage.minimumRecordSize', '" + derbyStorageMinimumRecordSize + "')");
-            conHelper.exec("CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY "
-                    + "('derby.storage.pageCacheSize', '" + derbyStoragePageCacheSize + "')");
-            conHelper.exec("CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY "
-                    + "('derby.storage.pageReservedSpace', '" + derbyStoragePageReservedSpace + "')");
-            conHelper.exec("CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY " + "('derby.storage.pageSize', '"
-                    + derbyStoragePageSize + "')");
-        }
     }
 
     /**
      * {@inheritDoc}
      */
-    @Override
-    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) {
-        return new DerbyConnectionHelper(dataSrc, blockOnConnectionLoss);
+    protected void checkSchema() throws SQLException, RepositoryException {
+        // set properties
+        if (DERBY_EMBEDDED_DRIVER.equals(getDriver())) {
+            Statement stmt = connectionManager.getConnection().createStatement();
+            try {
+                stmt.execute("CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY "
+                        + "('derby.storage.initialPages', '" + derbyStorageInitialPages + "')");
+                stmt.execute("CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY "
+                        + "('derby.storage.minimumRecordSize', '" + derbyStorageMinimumRecordSize + "')");
+                stmt.execute("CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY "
+                        + "('derby.storage.pageCacheSize', '" + derbyStoragePageCacheSize + "')");
+                stmt.execute("CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY "
+                        + "('derby.storage.pageReservedSpace', '" + derbyStoragePageReservedSpace + "')");
+                stmt.execute("CALL SYSCS_UTIL.SYSCS_SET_DATABASE_PROPERTY "
+                        + "('derby.storage.pageSize', '" + derbyStoragePageSize + "')");
+
+            } finally {
+                stmt.close();
+            }
+        }
+        super.checkSchema();
     }
 
+
     /**
      * {@inheritDoc}
      *
@@ -290,8 +300,35 @@
      * @see DatabasePersistenceManager#closeConnection(Connection)
      */
     public void close() throws Exception {
+        // check for embedded driver
+        if (!DERBY_EMBEDDED_DRIVER.equals(getDriver())) {
+            return;
+        }
+
+        // prepare connection url for issuing shutdown command
+        String url = connectionManager.getConnection().getMetaData().getURL();
+        int pos = url.lastIndexOf(';');
+        if (pos != -1) {
+            // strip any attributes from connection url
+            url = url.substring(0, pos);
+        }
+        url += ";shutdown=true";
+
+        // we have to reset the connection to 'autoCommit=true' before closing it;
+        // otherwise Derby would mysteriously complain about some pending uncommitted
+        // changes which can't possibly be true.
+        // @todo further investigate
+        connectionManager.getConnection().setAutoCommit(true);
+
         super.close();
-        ((DerbyConnectionHelper) conHelper).shutDown(getDriver());
+
+        // now it's safe to shutdown the embedded Derby database
+        try {
+            DriverManager.getConnection(url);
+        } catch (SQLException e) {
+            // a shutdown command always raises a SQLException
+            log.info(e.getMessage());
+        }
     }
 
 }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/OraclePersistenceManager.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/OraclePersistenceManager.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/OraclePersistenceManager.java	(.../trunk)	(revision 827970)
@@ -16,19 +16,20 @@
  */
 package org.apache.jackrabbit.core.persistence.bundle;
 
+import java.sql.DatabaseMetaData;
 import java.sql.SQLException;
 
-import javax.sql.DataSource;
-
 import org.apache.jackrabbit.core.persistence.PMContext;
 import org.apache.jackrabbit.core.persistence.bundle.util.DbNameIndex;
 import org.apache.jackrabbit.core.persistence.bundle.util.NGKDbNameIndex;
-import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
-import org.apache.jackrabbit.core.util.db.ConnectionHelper;
-import org.apache.jackrabbit.core.util.db.OracleConnectionHelper;
+import org.apache.jackrabbit.util.Text;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
- * Extends the {@link BundleDbPersistenceManager} by Oracle specific code. <p/> Configuration:<br>
+ * Extends the {@link BundleDbPersistenceManager} by Oracle specific code.
+ * <p/>
+ * Configuration:<br>
  * <ul>
  * <li>&lt;param name="{@link #setExternalBLOBs(String)} externalBLOBs}" value="false"/>
  * <li>&lt;param name="{@link #setBundleCacheSize(String) bundleCacheSize}" value="8"/>
@@ -46,8 +47,17 @@
  */
 public class OraclePersistenceManager extends BundleDbPersistenceManager {
 
+    /**
+     * the default logger
+     */
+    private static Logger log = LoggerFactory.getLogger(OraclePersistenceManager.class);
+
+    /** the variable for the Oracle table space */
+    public static final String TABLE_SPACE_VARIABLE =
+        "${tableSpace}";
+
     /** the Oracle table space to use */
-    protected String tableSpace = "";
+    protected String tableSpace;
 
     /**
      * Creates a new oracle persistence manager
@@ -59,7 +69,6 @@
 
     /**
      * Returns the configured Oracle table space.
-     * 
      * @return the configured Oracle table space.
      */
     public String getTableSpace() {
@@ -68,14 +77,13 @@
 
     /**
      * Sets the Oracle table space.
-     * 
      * @param tableSpace the Oracle table space.
      */
     public void setTableSpace(String tableSpace) {
-        if (tableSpace != null && tableSpace.trim().length() > 0) {
-            this.tableSpace = "tablespace " + tableSpace.trim();
+        if (tableSpace != null) {
+            this.tableSpace = tableSpace.trim();
         } else {
-            this.tableSpace = "";
+            this.tableSpace = null;
         }
     }
 
@@ -94,34 +102,77 @@
             setSchemaObjectPrefix(context.getHomeDir().getName() + "_");
         }
         super.init(context);
+
+        // check driver version
+        try {
+            DatabaseMetaData metaData = connectionManager.getConnection().getMetaData();
+            if (metaData.getDriverMajorVersion() < 10) {
+                // Oracle drivers prior to version 10 only support
+                // writing BLOBs up to 32k in size...
+                log.warn("Unsupported driver version detected: "
+                        + metaData.getDriverName()
+                        + " v" + metaData.getDriverVersion());
+            }
+        } catch (SQLException e) {
+            log.warn("Can not retrieve driver version", e);
+        }
     }
 
     /**
      * Returns a new instance of a NGKDbNameIndex.
-     * 
      * @return a new instance of a NGKDbNameIndex.
      * @throws SQLException if an SQL error occurs.
      */
     protected DbNameIndex createDbNameIndex() throws SQLException {
-        return new NGKDbNameIndex(conHelper, schemaObjectPrefix);
+        return new NGKDbNameIndex(connectionManager, schemaObjectPrefix);
     }
 
     /**
      * {@inheritDoc}
+     *
+     * @return <code>true</code>
      */
-    @Override
-    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
-        OracleConnectionHelper helper = new OracleConnectionHelper(dataSrc, blockOnConnectionLoss);
-        helper.init();
-        return helper;
+    protected boolean checkTablesWithUser() {
+        return true;
     }
 
     /**
      * {@inheritDoc}
      */
-    @Override
-    protected CheckSchemaOperation createCheckSchemaOperation() {
-        return super.createCheckSchemaOperation().addVariableReplacement(
-            CheckSchemaOperation.TABLE_SPACE_VARIABLE, tableSpace);
+    protected String createSchemaSQL(String sql) {
+        sql = Text.replace(sql, SCHEMA_OBJECT_PREFIX_VARIABLE, schemaObjectPrefix).trim();
+        // set the tablespace if it is defined
+        String tspace;
+        if (tableSpace == null || "".equals(tableSpace)) {
+            tspace = "";
+        } else {
+            tspace = "tablespace " + tableSpace;
+        }
+        return Text.replace(sql, TABLE_SPACE_VARIABLE, tspace).trim();
     }
+
+    /**
+     * Since Oracle only supports table names up to 30 characters in
+     * length illegal characters are simply replaced with "_" rather than
+     * escaping them with "_x0000_".
+     *
+     * @inheritDoc
+     */
+    protected void prepareSchemaObjectPrefix() throws Exception {
+        DatabaseMetaData metaData = connectionManager.getConnection().getMetaData();
+        String legalChars = metaData.getExtraNameCharacters();
+        legalChars += "ABCDEFGHIJKLMNOPQRSTUVWXZY0123456789_";
+
+        String prefix = schemaObjectPrefix.toUpperCase();
+        StringBuffer escaped = new StringBuffer();
+        for (int i = 0; i < prefix.length(); i++) {
+            char c = prefix.charAt(i);
+            if (legalChars.indexOf(c) == -1) {
+                escaped.append('_');
+            } else {
+                escaped.append(c);
+            }
+        }
+        schemaObjectPrefix = escaped.toString();
+    }
 }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/H2PersistenceManager.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/H2PersistenceManager.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/H2PersistenceManager.java	(.../trunk)	(revision 827970)
@@ -18,6 +18,11 @@
 
 import org.apache.jackrabbit.core.persistence.PMContext;
 
+import java.sql.Statement;
+import java.sql.SQLException;
+
+import javax.jcr.RepositoryException;
+
 /**
  * Extends the {@link BundleDbPersistenceManager} by H2 specific code.
  * <p/>
@@ -60,6 +65,12 @@
     }
 
     /**
+     * Creates a new H2 persistence manager.
+     */
+    public H2PersistenceManager() {
+    }
+
+    /**
      * {@inheritDoc}
      */
    public void init(PMContext context) throws Exception {
@@ -78,8 +89,19 @@
         }
 
         super.init(context);
-        
-        conHelper.exec("SET LOCK_TIMEOUT " + lockTimeout);
     }
 
+    /**
+     * {@inheritDoc}
+     */
+    protected void checkSchema() throws SQLException, RepositoryException {
+        Statement stmt = connectionManager.getConnection().createStatement();
+        try {
+            stmt.execute("SET LOCK_TIMEOUT " + lockTimeout);
+        } finally {
+            stmt.close();
+        }
+        super.checkSchema();
+    }
+
 }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/AbstractBundlePersistenceManager.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/AbstractBundlePersistenceManager.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/AbstractBundlePersistenceManager.java	(.../trunk)	(revision 827970)
@@ -16,40 +16,40 @@
  */
 package org.apache.jackrabbit.core.persistence.bundle;
 
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Set;
-
-import javax.jcr.PropertyType;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.jackrabbit.core.fs.FileSystemResource;
 import org.apache.jackrabbit.core.fs.FileSystem;
-import org.apache.jackrabbit.core.NamespaceRegistryImpl;
+import org.apache.jackrabbit.core.state.ItemState;
+import org.apache.jackrabbit.core.state.ChangeLog;
+import org.apache.jackrabbit.core.state.ItemStateException;
+import org.apache.jackrabbit.core.state.NodeReferences;
+import org.apache.jackrabbit.core.state.NoSuchItemStateException;
+import org.apache.jackrabbit.core.state.PropertyState;
+import org.apache.jackrabbit.core.state.NodeState;
 import org.apache.jackrabbit.core.id.ItemId;
 import org.apache.jackrabbit.core.id.NodeId;
 import org.apache.jackrabbit.core.id.PropertyId;
+import org.apache.jackrabbit.core.NamespaceRegistryImpl;
+import org.apache.jackrabbit.core.value.InternalValue;
 import org.apache.jackrabbit.core.persistence.IterablePersistenceManager;
 import org.apache.jackrabbit.core.persistence.PMContext;
 import org.apache.jackrabbit.core.persistence.PersistenceManager;
-import org.apache.jackrabbit.core.persistence.bundle.util.BundleBinding;
+import org.apache.jackrabbit.core.util.StringIndex;
+import org.apache.jackrabbit.core.persistence.bundle.util.NodePropBundle;
 import org.apache.jackrabbit.core.persistence.bundle.util.BundleCache;
+import org.apache.jackrabbit.core.persistence.bundle.util.LRUNodeIdCache;
 import org.apache.jackrabbit.core.persistence.bundle.util.HashMapIndex;
-import org.apache.jackrabbit.core.persistence.bundle.util.LRUNodeIdCache;
-import org.apache.jackrabbit.core.persistence.bundle.util.NodePropBundle;
-import org.apache.jackrabbit.core.state.ItemState;
-import org.apache.jackrabbit.core.state.ChangeLog;
-import org.apache.jackrabbit.core.state.ItemStateException;
-import org.apache.jackrabbit.core.state.NodeReferences;
-import org.apache.jackrabbit.core.state.NoSuchItemStateException;
-import org.apache.jackrabbit.core.state.PropertyState;
-import org.apache.jackrabbit.core.state.NodeState;
-import org.apache.jackrabbit.core.util.StringIndex;
-import org.apache.jackrabbit.core.value.InternalValue;
+import org.apache.jackrabbit.core.persistence.bundle.util.BundleBinding;
 import org.apache.jackrabbit.spi.Name;
 import org.apache.jackrabbit.spi.commons.name.NameConstants;
 
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Set;
+
+import javax.jcr.PropertyType;
+
 /**
  * The <code>AbstractBundlePersistenceManager</code> acts as base for all
  * persistence managers that store the state in a {@link NodePropBundle}.
@@ -131,7 +131,7 @@
     public void setBundleCacheSize(String bundleCacheSize) {
         this.bundleCacheSize = Long.parseLong(bundleCacheSize) * 1024 * 1024;
     }
-    
+
     /**
      * Creates the folder path for the given node id that is suitable for
      * storing states in a filesystem.
@@ -405,7 +405,8 @@
      *
      * Loads the state via the appropriate NodePropBundle.
      */
-    public synchronized NodeState load(NodeId id) throws NoSuchItemStateException, ItemStateException {
+    public synchronized NodeState load(NodeId id)
+            throws NoSuchItemStateException, ItemStateException {
         NodePropBundle bundle = getBundle(id);
         if (bundle == null) {
             throw new NoSuchItemStateException(id.toString());
@@ -418,7 +419,8 @@
      *
      * Loads the state via the appropriate NodePropBundle.
      */
-    public synchronized PropertyState load(PropertyId id) throws NoSuchItemStateException, ItemStateException {
+    public synchronized PropertyState load(PropertyId id)
+            throws NoSuchItemStateException, ItemStateException {
         NodePropBundle bundle = getBundle(id.getParentId());
         if (bundle == null) {
             throw new NoSuchItemStateException(id.toString());
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/ConnectionFactory.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/ConnectionFactory.java	(.../sandbox/JCR-1456)	(revision 0)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/ConnectionFactory.java	(.../trunk)	(revision 827970)
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.persistence.bundle.util;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+
+import javax.jcr.RepositoryException;
+import javax.naming.Context;
+import javax.naming.NamingException;
+import javax.sql.DataSource;
+
+/**
+ * A factory for new database connections.
+ * Supported are regular JDBC drivers, as well as
+ * JNDI resources.
+ */
+public class ConnectionFactory {
+
+    /**
+     * Utility classes should not have a public or default constructor.
+     */
+    private ConnectionFactory() {
+    }
+
+    /**
+     * Open a connection using the specified properties.
+     * The connection can be created using a JNDI Data Source as well. To do that,
+     * the driver class name must reference a javax.naming.Context class
+     * (for example javax.naming.InitialContext), and the URL must be the JNDI URL
+     * (for example java:comp/env/jdbc/Test).
+     *
+     * @param driver the JDBC driver or the Context class
+     * @param url the database URL
+     * @param user the user name
+     * @param password the password
+     * @return the connection
+     * @throws RepositoryException if the driver could not be loaded
+     * @throws SQLException if the connection could not be established
+     */
+    public static Connection getConnection(String driver, String url,
+            String user, String password) throws RepositoryException,
+            SQLException {
+        if (driver != null && driver.length() > 0) {
+            try {
+                Class< ? > d = Class.forName(driver);
+                if (javax.naming.Context.class.isAssignableFrom(d)) {
+                    // JNDI context
+                    Context context = (Context) d.newInstance();
+                    DataSource ds = (DataSource) context.lookup(url);
+                    if (user == null && password == null) {
+                        return ds.getConnection();
+                    } else {
+                        return ds.getConnection(user, password);
+                    }
+                } else {
+                    try {
+                        // Workaround for Apache Derby:
+                        // The JDBC specification recommends the Class.forName method without the .newInstance() method call,
+                        // but it is required after a Derby 'shutdown'.
+                        d.newInstance();
+                    } catch (Throwable e) {
+                        // Ignore exceptions
+                        // There's no requirement that a JDBC driver class has a public default constructor
+                    }
+                }
+            } catch (ClassNotFoundException e) {
+                throw new RepositoryException("Could not load class " + driver, e);
+            } catch (InstantiationException e) {
+                throw new RepositoryException("Could not instantiate context " + driver, e);
+            } catch (IllegalAccessException e) {
+                throw new RepositoryException("Could not instantiate context " + driver, e);
+            } catch (NamingException e) {
+                throw new RepositoryException("Naming exception using " + driver + " url: " + url, e);
+            }
+        }
+        return DriverManager.getConnection(url, user, password);
+    }
+
+}

Property changes on: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/ConnectionFactory.java
___________________________________________________________________
Added: svn:eol-style
   + native

Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/ConnectionRecoveryManager.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/ConnectionRecoveryManager.java	(.../sandbox/JCR-1456)	(revision 0)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/ConnectionRecoveryManager.java	(.../trunk)	(revision 827970)
@@ -0,0 +1,482 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.persistence.bundle.util;
+
+import java.io.InputStream;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.HashMap;
+
+import javax.jcr.RepositoryException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class provides methods to get a database connection and to execute SQL statements.
+ * It also contains reconnection logic. If the connection has been closed with the
+ * {@link #close()} method, then  a call to any public method except for
+ * {@link #setAutoReconnect(boolean)} will try to reestablish the connection, but
+ * only if the <code>autoReconnect</code> equals <code>true</code>.
+ * <p />
+ * The reconnection attempt
+ * can either be blocking or non-blocking, which is configured during construction.
+ * In the latter case a fixed number of reconnection attempts is made. When the
+ * reconnection failed an SQLException is thrown.
+ * <p />
+ * The methods of this class that execute SQL statements automatically call
+ * {@link #close()} when they encounter an SQLException.
+ *
+ */
+public class ConnectionRecoveryManager {
+
+    /**
+     * The default logger.
+     */
+    private static Logger log = LoggerFactory.getLogger(ConnectionRecoveryManager.class);
+
+    /**
+     * The database driver.
+     */
+    private final String driver;
+
+    /**
+     * The database URL.
+     */
+    private final String url;
+
+    /**
+     * The database user.
+     */
+    private final String user;
+
+    /**
+     * The database password.
+     */
+    private final String password;
+
+    /**
+     * The database connection that is managed by this {@link ConnectionRecoveryManager}.
+     */
+    private Connection connection;
+
+    /**
+     * An internal flag governing whether an automatic reconnect should be
+     * attempted after a SQLException had been encountered in
+     * {@link #executeStmt(String, Object[])}.
+     */
+    private boolean autoReconnect = true;
+
+    /**
+     * Indicates whether the reconnection function should block
+     * until the connection is up again.
+     */
+    private final boolean block;
+
+    /**
+     * Time to sleep in ms before a reconnect is attempted.
+     */
+    private static final int SLEEP_BEFORE_RECONNECT = 500;
+
+    /**
+     * Number of reconnection attempts per method call. Only
+     * used if <code>block == false</code>.
+     */
+    public static final int TRIALS = 20;
+
+    /**
+     * The map of prepared statements
+     */
+    private HashMap<String, PreparedStatement> preparedStatements = new HashMap<String, PreparedStatement>();
+
+    /**
+     * Indicates whether the managed connection is open or closed.
+     */
+    private boolean isClosed;
+
+    /**
+     * Creates a {@link ConnectionRecoveryManager} and establishes
+     * a database Connection using the driver, user, password and url
+     * arguments.
+     * <p />
+     * By default, the connection is in auto-commit mode, and this
+     * manager tries to reconnect if the connection is lost.
+     *
+     * @param block whether this class should block until the connection can be recovered
+     * @param driver the driver to use for the connection
+     * @param url the url to use for the connection
+     * @param user the user to use for the connection
+     * @param password the password to use for the connection
+     * @throws RepositoryException if the database driver could not be loaded
+     */
+    public ConnectionRecoveryManager(
+            boolean block, String driver, String url, String user, String password)
+            throws RepositoryException {
+        this.block = block;
+        this.driver = driver;
+        this.url = url;
+        this.user = user;
+        this.password = password;
+        try {
+            setupConnection();
+            isClosed = false;
+        } catch (SQLException e) {
+            logException("could not setup connection", e);
+            close();
+        }
+    }
+
+    /**
+     * Gets the database connection that is managed. If the
+     * connection has been closed, and autoReconnect==true
+     * then an attempt is made to reestablish the connection.
+     *
+     * @return the database connection that is managed
+     * @throws SQLException on error
+     * @throws RepositoryException if the database driver could not be loaded
+     */
+    public synchronized Connection getConnection() throws SQLException, RepositoryException {
+        if (isClosed) {
+            if (autoReconnect) {
+                reestablishConnection();
+            } else {
+                throw new SQLException("connection has been closed and autoReconnect == false");
+            }
+        }
+        return connection;
+    }
+
+    /**
+     * Starts a transaction. I.e., the auto-commit is set to false,
+     * and the manager does not try to reconnect if the connection
+     * is lost. This method call should be followed by a call to
+     * <code>endTransaction</code>.
+     *
+     * @throws SQLException on error
+     */
+    public synchronized void setAutoReconnect(boolean autoReconnect) {
+        this.autoReconnect = autoReconnect;
+    }
+
+    /**
+     * Executes the given SQL query. Retries once or blocks (when the
+     * <code>block</code> parameter has been set to true on construction)
+     * if this fails and autoReconnect is enabled.
+     *
+     * @param sql the SQL query to execute
+     * @return the executed ResultSet
+     * @throws SQLException on error
+     * @throws RepositoryException if the database driver could not be loaded
+     */
+    public synchronized ResultSet executeQuery(String sql) throws SQLException, RepositoryException {
+        int trials = 2;
+        SQLException lastException  = null;
+        do {
+            trials--;
+            try {
+                return executeQueryInternal(sql);
+            } catch (SQLException e) {
+                lastException = e;
+            }
+        } while(autoReconnect && (block || trials > 0));
+        throw lastException;
+    }
+
+    /**
+     * Executes the given SQL query.
+     *
+     * @param sql query to execute
+     * @return a <code>ResultSet</code> object
+     * @throws SQLException if an error occurs
+     * @throws RepositoryException if the database driver could not be loaded
+     */
+    private ResultSet executeQueryInternal(String sql) throws SQLException, RepositoryException {
+        PreparedStatement stmt = null;
+        try {
+            stmt = preparedStatements.get(sql);
+            if (stmt == null) {
+                stmt = getConnection().prepareStatement(sql);
+                preparedStatements.put(sql, stmt);
+            }
+            return stmt.executeQuery();
+        } catch (SQLException e) {
+            logException("could not execute statement", e);
+            close();
+            throw e;
+        } finally {
+            resetStatement(stmt);
+        }
+    }
+
+    /**
+     * Executes the given SQL statement with the specified parameters.
+     *
+     * @param sql statement to execute
+     * @param params parameters to set
+     * @return the <code>Statement</code> object that had been executed
+     * @throws SQLException if an error occurs
+     * @throws RepositoryException if the database driver could not be loaded
+     */
+    public PreparedStatement executeStmt(String sql, Object[] params) throws SQLException, RepositoryException {
+        return executeStmt(sql, params, false, 0);
+    }
+
+    /**
+     * Executes the given SQL statement with the specified parameters.
+     *
+     * @param sql statement to execute
+     * @param params parameters to set
+     * @param returnGeneratedKeys if the statement should return auto generated keys
+     * @param maxRows the maximum number of rows to return (0 for all rows)
+     * @return the <code>Statement</code> object that had been executed
+     * @throws SQLException if an error occurs
+     * @throws RepositoryException if the database driver could not be loaded
+     */
+    public synchronized PreparedStatement executeStmt(
+            String sql, Object[] params, boolean returnGeneratedKeys, int maxRows)
+            throws SQLException, RepositoryException {
+        int trials = 2;
+        SQLException lastException  = null;
+        do {
+            trials--;
+            try {
+                return executeStmtInternal(sql, params, returnGeneratedKeys, maxRows);
+            } catch (SQLException e) {
+                lastException = e;
+            }
+        } while(autoReconnect && (block || trials > 0));
+        throw lastException;
+    }
+
+    /**
+     * Executes the given SQL statement with the specified parameters.
+     *
+     * @param sql statement to execute
+     * @param params parameters to set
+     * @param returnGeneratedKeys if the statement should return auto generated keys
+     * @param maxRows the maximum number of rows to return (0 for all rows)
+     * @return the <code>Statement</code> object that had been executed
+     * @throws SQLException if an error occurs
+     * @throws RepositoryException if the database driver could not be loaded
+     */
+    private PreparedStatement executeStmtInternal(
+            String sql, Object[] params, boolean returnGeneratedKeys, int maxRows)
+            throws SQLException, RepositoryException {
+        try {
+            String key = sql;
+            if (returnGeneratedKeys) {
+                key += " RETURN_GENERATED_KEYS";
+            }
+            PreparedStatement stmt = preparedStatements.get(key);
+            if (stmt == null) {
+                if (returnGeneratedKeys) {
+                    stmt = getConnection().prepareStatement(sql, Statement.RETURN_GENERATED_KEYS);
+                } else {
+                    stmt = getConnection().prepareStatement(sql);
+                }
+                preparedStatements.put(key, stmt);
+            }
+            stmt.setMaxRows(maxRows);
+            return executeStmtInternal(params, stmt);
+        } catch (SQLException e) {
+            logException("could not execute statement", e);
+            close();
+            throw e;
+        }
+    }
+
+    /**
+     * Closes all resources held by this {@link ConnectionRecoveryManager}.
+     * An ongoing transaction is discarded.
+     */
+    public synchronized void close() {
+        preparedStatements.clear();
+        try {
+            if (connection != null) {
+                if (!connection.getAutoCommit()) {
+                    connection.rollback();
+                }
+                connection.close();
+            }
+        } catch (SQLException e) {
+            logException("failed to close connection", e);
+        }
+        connection = null;
+        isClosed = true;
+    }
+
+    /**
+     * Creates the database connection.
+     *
+     * @throws SQLException on error
+     * @throws RepositoryException if the database driver could not be loaded
+     */
+    private void setupConnection() throws SQLException, RepositoryException {
+        try {
+            connection = ConnectionFactory.getConnection(driver, url, user, password);
+        } catch (SQLException e) {
+            log.warn("Could not connect; driver: " + driver + " url: " + url + " user: " + user + " error: " + e.toString(), e);
+            throw e;
+        }
+        // JCR-1013: Setter may fail unnecessarily on a managed connection
+        if (!connection.getAutoCommit()) {
+            connection.setAutoCommit(true);
+        }
+        try {
+            DatabaseMetaData meta = connection.getMetaData();
+            log.info("Database: " + meta.getDatabaseProductName() + " / " + meta.getDatabaseProductVersion());
+            log.info("Driver: " + meta.getDriverName() + " / " + meta.getDriverVersion());
+        } catch (SQLException e) {
+            log.warn("Can not retrieve database and driver name / version", e);
+        }
+    }
+
+    /**
+     * @param params the parameters for the <code>stmt</code> parameter
+     * @param stmt the statement to execute
+     * @return the executed Statement
+     * @throws SQLException on error
+     */
+    private PreparedStatement executeStmtInternal(Object[] params, PreparedStatement stmt) throws SQLException {
+        for (int i = 0; params != null && i < params.length; i++) {
+            Object p = params[i];
+            if (p instanceof StreamWrapper) {
+                StreamWrapper wrapper = (StreamWrapper) p;
+                stmt.setBinaryStream(i + 1, wrapper.stream, (int) wrapper.size);
+            } else if (p instanceof InputStream) {
+                InputStream stream = (InputStream) p;
+                stmt.setBinaryStream(i + 1, stream, -1);
+            } else {
+                stmt.setObject(i + 1, p);
+            }
+        }
+        stmt.execute();
+        resetStatement(stmt);
+        return stmt;
+    }
+
+    /**
+     * Re-establishes the database connection.
+     *
+     * @throws SQLException if reconnecting failed
+     * @throws RepositoryException
+     */
+    private void reestablishConnection() throws SQLException, RepositoryException {
+
+        long trials = TRIALS;
+        SQLException exception = null;
+
+        // Close the connection (might already have been done)
+        close();
+
+        if (block) {
+            log.warn("blocking until database connection is up again...");
+        }
+
+        // Try to reconnect
+        while (trials-- >= 0 || block) {
+
+            // Reset the last caught exception
+            exception = null;
+
+            // Sleep for a while to give database a chance
+            // to restart before a reconnect is attempted.
+            try {
+                Thread.sleep(SLEEP_BEFORE_RECONNECT);
+            } catch (InterruptedException ignore) {
+            }
+
+            // now try to re-establish connection
+            try {
+                setupConnection();
+                isClosed = false;
+                break;
+            } catch (SQLException e) {
+                exception = e;
+                close();
+            }
+        }
+
+        // Rethrow last caught exception (if this is not null, then
+        // we know that reconnecting failed and close has been called.
+        if (exception != null) {
+            throw exception;
+        } else if (block) {
+            log.warn("database connection is up again!");
+        }
+    }
+
+    /**
+     * Resets the given <code>PreparedStatement</code> by clearing the
+     * parameters and warnings contained.
+     *
+     * @param stmt The <code>PreparedStatement</code> to reset. If
+     *             <code>null</code> this method does nothing.
+     */
+    private void resetStatement(PreparedStatement stmt) {
+        if (stmt != null) {
+            try {
+                stmt.clearParameters();
+                stmt.clearWarnings();
+            } catch (SQLException se) {
+                logException("Failed resetting PreparedStatement", se);
+            }
+        }
+    }
+
+    /**
+     * Logs an sql exception.
+     *
+     * @param message the message
+     * @param se the exception
+     */
+    private void logException(String message, SQLException se) {
+        message = message == null ? "" : message;
+        log.error(message + ", reason: " + se.getMessage() + ", state/code: "
+                + se.getSQLState() + "/" + se.getErrorCode());
+        log.debug("   dump:", se);
+    }
+
+    /**
+     * A wrapper for a binary stream that includes the
+     * size of the stream.
+     *
+     */
+    public static class StreamWrapper {
+
+        private final InputStream stream;
+        private final long size;
+
+        /**
+         * Creates a wrapper for the given InputStream that can
+         * safely be passed as a parameter to the <code>executeStmt</code>
+         * methods in the {@link ConnectionRecoveryManager} class.
+         *
+         * @param in the InputStream to wrap
+         * @param size the size of the input stream
+         */
+        public StreamWrapper(InputStream in, long size) {
+            this.stream = in;
+            this.size = size;
+        }
+    }
+
+}

Property changes on: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/ConnectionRecoveryManager.java
___________________________________________________________________
Added: svn:eol-style
   + native

Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/PostgreSQLNameIndex.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/PostgreSQLNameIndex.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/PostgreSQLNameIndex.java	(.../trunk)	(revision 827970)
@@ -19,9 +19,6 @@
 import java.sql.ResultSet;
 import java.sql.SQLException;
 
-import org.apache.jackrabbit.core.util.db.ConnectionHelper;
-import org.apache.jackrabbit.core.util.db.DbUtility;
-
 /**
  * Same as {@link DbNameIndex} but does not make use of the
  * {@link java.sql.Statement#RETURN_GENERATED_KEYS} feature as it is not
@@ -31,9 +28,9 @@
 
     protected String generatedKeySelectSQL;
 
-    public PostgreSQLNameIndex(ConnectionHelper connectionHelper, String schemaObjectPrefix)
+    public PostgreSQLNameIndex(ConnectionRecoveryManager conMgr, String schemaObjectPrefix)
             throws SQLException {
-        super(connectionHelper, schemaObjectPrefix);
+        super(conMgr, schemaObjectPrefix);
     }
 
     /**
@@ -63,9 +60,9 @@
     protected int insertString(String string) {
         // assert index does not exist
         try {
-            conHelper.exec(nameInsertSQL, new Object[]{string});
+            connectionManager.executeStmt(nameInsertSQL, new Object[]{string});
             return getGeneratedKey();
-        } catch (Exception e) {        	
+        } catch (Exception e) {
             IllegalStateException ise = new IllegalStateException("Unable to insert index for string: " + string);
             ise.initCause(e);
             throw ise;
@@ -77,20 +74,21 @@
      * @return the index.
      */
     protected int getGeneratedKey() {
-        ResultSet rs = null;
         try {
-           rs = conHelper.exec(generatedKeySelectSQL, null, false, 0);
-            if (!rs.next()) {
-                return -1;
-            } else {
-                return rs.getInt(1);
+            ResultSet rs = connectionManager.executeQuery(generatedKeySelectSQL);
+            try {
+                if (!rs.next()) {
+                    return -1;
+                } else {
+                    return rs.getInt(1);
+                }
+            } finally {
+                rs.close();
             }
         } catch (Exception e) {
             IllegalStateException ise = new IllegalStateException("Unable to read generated index");
             ise.initCause(e);
             throw ise;
-        } finally {
-            DbUtility.close(rs);
         }
     }
 
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/DbNameIndex.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/DbNameIndex.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/DbNameIndex.java	(.../trunk)	(revision 827970)
@@ -16,13 +16,13 @@
  */
 package org.apache.jackrabbit.core.persistence.bundle.util;
 
+import java.util.HashMap;
+
 import java.sql.ResultSet;
 import java.sql.SQLException;
-import java.util.HashMap;
+import java.sql.Statement;
 
 import org.apache.jackrabbit.core.util.StringIndex;
-import org.apache.jackrabbit.core.util.db.ConnectionHelper;
-import org.apache.jackrabbit.core.util.db.DbUtility;
 
 /**
  * Implements a {@link StringIndex} that stores and retrieves the names from a
@@ -37,7 +37,10 @@
  */
 public class DbNameIndex implements StringIndex {
 
-    protected final ConnectionHelper conHelper;
+    /**
+     * The class that manages statement execution and recovery from connection loss.
+     */
+    protected ConnectionRecoveryManager connectionManager;
 
     // name index statements
     protected String nameSelectSQL;
@@ -50,13 +53,13 @@
 
     /**
      * Creates a new index that is stored in a db.
-     * @param conHelper the {@link ConnectionHelper}
+     * @param con the jdbc connection
      * @param schemaObjectPrefix the prefix for table names
      * @throws SQLException if the statements cannot be prepared.
      */
-    public DbNameIndex(ConnectionHelper conHlpr, String schemaObjectPrefix)
+    public DbNameIndex(ConnectionRecoveryManager conMgr, String schemaObjectPrefix)
             throws SQLException {
-        conHelper = conHlpr;
+        connectionManager = conMgr;
         init(schemaObjectPrefix);
     }
 
@@ -130,19 +133,22 @@
     protected int insertString(String string) {
         // assert index does not exist
         int result = -1;
-        ResultSet rs = null;
         try {
-            rs = conHelper.exec(nameInsertSQL, new Object[] { string }, true, 0);
-            if (rs.next()) {
-                result = rs.getInt(1);
+            Statement stmt = connectionManager.executeStmt(
+                    nameInsertSQL, new Object[] { string }, true, 0);
+            ResultSet rs = stmt.getGeneratedKeys();
+            try {
+                if (rs.next()) {
+                    result = rs.getInt(1);
+                }
+            } finally {
+                rs.close();
             }
         } catch (Exception e) {
             IllegalStateException ise = new IllegalStateException(
                     "Unable to insert index for string: " + string);
             ise.initCause(e);
             throw ise;
-        } finally {
-        	DbUtility.close(rs);
         }
         if (result != -1) {
             return result;
@@ -158,21 +164,24 @@
      * @return the index or -1 if not found.
      */
     protected int getIndex(String string) {
-        ResultSet rs = null;
         try {
-            rs = conHelper.exec(indexSelectSQL, new Object[] { string }, false, 0);
-            if (rs.next()) {
-                return rs.getInt(1);
-            } else {
-                return -1;
+            Statement stmt = connectionManager.executeStmt(
+                    indexSelectSQL, new Object[] { string });
+            ResultSet rs = stmt.getResultSet();
+            try {
+                if (rs.next()) {
+                    return rs.getInt(1);
+                } else {
+                    return -1;
+                }
+            } finally {
+                rs.close();
             }
         } catch (Exception e) {
             IllegalStateException ise = new IllegalStateException(
                     "Unable to read index for string: " + string);
             ise.initCause(e);
             throw ise;
-        } finally {
-        	DbUtility.close(rs);
         }
     }
 
@@ -185,19 +194,22 @@
     protected String getString(int index)
             throws IllegalArgumentException, IllegalStateException {
         String result = null;
-        ResultSet rs = null;
         try {
-           rs = conHelper.exec(nameSelectSQL, new Object[] { Integer.valueOf(index) }, false, 0);
-            if (rs.next()) {
-                result = rs.getString(1);
+            Statement stmt = connectionManager.executeStmt(
+                    nameSelectSQL, new Object[] { Integer.valueOf(index) });
+            ResultSet rs = stmt.getResultSet();
+            try {
+                if (rs.next()) {
+                    result = rs.getString(1);
+                }
+            } finally {
+                rs.close();
             }
         } catch (Exception e) {
             IllegalStateException ise = new IllegalStateException(
                     "Unable to read name for index: " + index);
             ise.initCause(e);
             throw ise;
-        } finally {
-        	DbUtility.close(rs);
         }
         if (result == null) {
             throw new IllegalArgumentException("Index not found: " + index);
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/NGKDbNameIndex.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/NGKDbNameIndex.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/util/NGKDbNameIndex.java	(.../trunk)	(revision 827970)
@@ -17,10 +17,7 @@
 package org.apache.jackrabbit.core.persistence.bundle.util;
 
 import java.sql.SQLException;
-import java.sql.Statement;
 
-import org.apache.jackrabbit.core.util.db.ConnectionHelper;
-
 /**
  * Same as {@link DbNameIndex} but does not make use of the
  * {@link Statement#RETURN_GENERATED_KEYS} feature as it might not be provided
@@ -30,13 +27,13 @@
 
     /**
      * Creates a new index that is stored in a db.
-     * @param conHelper the {@link ConnectionHelper}
+     * @param con the ConnectionRecoveryManager
      * @param schemaObjectPrefix the prefix for table names
      * @throws SQLException if the statements cannot be prepared.
      */
-    public NGKDbNameIndex(ConnectionHelper conHelper, String schemaObjectPrefix)
+    public NGKDbNameIndex(ConnectionRecoveryManager conMgr, String schemaObjectPrefix)
             throws SQLException {
-        super(conHelper, schemaObjectPrefix);
+        super(conMgr, schemaObjectPrefix);
     }
 
     /**
@@ -61,13 +58,14 @@
     protected int insertString(String string) {
         // assert index does not exist
         try {
-            conHelper.exec(nameInsertSQL, new Object[] { string });
+            connectionManager.executeStmt(nameInsertSQL, new Object[] { string });
         } catch (Exception e) {
             IllegalStateException ise = new IllegalStateException(
                     "Unable to insert index for string: " + string);
             ise.initCause(e);
             throw ise;
         }
+
         return getIndex(string);
     }
 }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/Oracle9PersistenceManager.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/Oracle9PersistenceManager.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/persistence/bundle/Oracle9PersistenceManager.java	(.../trunk)	(revision 827970)
@@ -16,14 +16,33 @@
  */
 package org.apache.jackrabbit.core.persistence.bundle;
 
-import javax.sql.DataSource;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.core.persistence.PMContext;
+import org.apache.jackrabbit.core.persistence.bundle.util.NodePropBundle;
+import org.apache.jackrabbit.core.persistence.util.Serializer;
+import org.apache.jackrabbit.core.state.ItemStateException;
+import org.apache.jackrabbit.core.state.NodeReferences;
 
-import org.apache.jackrabbit.core.util.db.ConnectionHelper;
-import org.apache.jackrabbit.core.util.db.Oracle10R1ConnectionHelper;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.lang.reflect.Method;
+import java.sql.Blob;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
 
 /**
- * <code>OracleLegacyPersistenceManager</code> provides support for Oracle jdbc drivers prior to version 10
- * which require special handling of BLOB data. <p/> Configuration:<br>
+ * <code>OracleLegacyPersistenceManager</code> provides support for Oracle jdbc
+ * drivers prior to version 10 which require special handling of BLOB data.
+ * <p/>
+ * Configuration:<br>
  * <ul>
  * <li>&lt;param name="{@link #setBundleCacheSize(String) bundleCacheSize}" value="8"/>
  * <li>&lt;param name="{@link #setConsistencyCheck(String) consistencyCheck}" value="false"/>
@@ -40,12 +59,201 @@
 public class Oracle9PersistenceManager extends OraclePersistenceManager {
 
     /**
+     * the default logger
+     */
+    private static Logger log = LoggerFactory.getLogger(Oracle9PersistenceManager.class);
+
+    private Class< ? > blobClass;
+    private Integer duractionSessionConstant;
+    private Integer modeReadWriteConstant;
+
+    public Oracle9PersistenceManager() {
+    }
+
+    //-----------------------------------< OraclePersistenceManager overrides >
+    /**
      * {@inheritDoc}
+     * <p/>
+     * Retrieve the <code>oracle.sql.BLOB</code> class via reflection, and
+     * initialize the values for the <code>DURATION_SESSION</code> and
+     * <code>MODE_READWRITE</code> constants defined there.
+     *
+     * @see oracle.sql.BLOB#DURATION_SESSION
+     * @see oracle.sql.BLOB#MODE_READWRITE
      */
-    @Override
-    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
-        Oracle10R1ConnectionHelper helper = new Oracle10R1ConnectionHelper(dataSrc, blockOnConnectionLoss);
-        helper.init();
-        return helper;
+    public void init(PMContext context) throws Exception {
+        super.init(context);
+
+        // initialize oracle.sql.BLOB class & constants
+
+        // use the Connection object for using the exact same
+        // class loader that the Oracle driver was loaded with
+        blobClass = connectionManager.getConnection().getClass().getClassLoader().loadClass("oracle.sql.BLOB");
+        duractionSessionConstant =
+                new Integer(blobClass.getField("DURATION_SESSION").getInt(null));
+        modeReadWriteConstant =
+                new Integer(blobClass.getField("MODE_READWRITE").getInt(null));
     }
+
+    /**
+     * @inheritDoc
+     */
+    protected BundleDbPersistenceManager.CloseableBLOBStore createDBBlobStore(PMContext context) throws Exception {
+        return new OracleBLOBStore();
+    }
+
+    /**
+     * @inheritDoc
+     */
+    protected synchronized void storeBundle(NodePropBundle bundle)
+            throws ItemStateException {
+        Blob blob = null;
+        try {
+            ByteArrayOutputStream out = new ByteArrayOutputStream(INITIAL_BUFFER_SIZE);
+            DataOutputStream dout = new DataOutputStream(out);
+            binding.writeBundle(dout, bundle);
+            dout.close();
+
+            String sql = bundle.isNew() ? bundleInsertSQL : bundleUpdateSQL;
+            blob = createTemporaryBlob(new ByteArrayInputStream(out.toByteArray()));
+            Object[] params = createParams(bundle.getId(), blob, true);
+            connectionManager.executeStmt(sql, params);
+        } catch (Exception e) {
+            String msg = "failed to write bundle: " + bundle.getId();
+            log.error(msg, e);
+            throw new ItemStateException(msg, e);
+        } finally {
+            if (blob != null) {
+                try {
+                    freeTemporaryBlob(blob);
+                } catch (Exception e1) {
+                }
+            }
+        }
+    }
+
+    /**
+     * @inheritDoc
+     */
+    public synchronized void store(NodeReferences refs)
+            throws ItemStateException {
+        if (!initialized) {
+            throw new IllegalStateException("not initialized");
+        }
+
+        Blob blob = null;
+        try {
+            // check if insert or update
+            boolean update = existsReferencesTo(refs.getTargetId());
+            String sql = (update) ? nodeReferenceUpdateSQL : nodeReferenceInsertSQL;
+
+            ByteArrayOutputStream out = new ByteArrayOutputStream(INITIAL_BUFFER_SIZE);
+            // serialize references
+            Serializer.serialize(refs, out);
+
+            // we are synchronized on this instance, therefore we do not
+            // not have to additionally synchronize on the preparedStatement
+
+            blob = createTemporaryBlob(new ByteArrayInputStream(out.toByteArray()));
+            Object[] params = createParams(refs.getTargetId(), blob, true);
+            connectionManager.executeStmt(sql, params);
+
+            // there's no need to close a ByteArrayOutputStream
+            //out.close();
+        } catch (Exception e) {
+            String msg = "failed to write " + refs;
+            log.error(msg, e);
+            throw new ItemStateException(msg, e);
+        } finally {
+            if (blob != null) {
+                try {
+                    freeTemporaryBlob(blob);
+                } catch (Exception e1) {
+                }
+            }
+        }
+    }
+
+    //----------------------------------------< oracle-specific blob handling >
+    /**
+     * Creates a temporary oracle.sql.BLOB instance via reflection and spools
+     * the contents of the specified stream.
+     */
+    protected Blob createTemporaryBlob(InputStream in) throws Exception {
+        /*
+        BLOB blob = BLOB.createTemporary(con, false, BLOB.DURATION_SESSION);
+        blob.open(BLOB.MODE_READWRITE);
+        OutputStream out = blob.getBinaryOutputStream();
+        ...
+        out.flush();
+        out.close();
+        blob.close();
+        return blob;
+        */
+        Method createTemporary = blobClass.getMethod("createTemporary",
+                new Class[]{Connection.class, Boolean.TYPE, Integer.TYPE});
+        Object blob = createTemporary.invoke(null,
+                new Object[]{connectionManager.getConnection(), Boolean.FALSE, duractionSessionConstant});
+        Method open = blobClass.getMethod("open", new Class[]{Integer.TYPE});
+        open.invoke(blob, new Object[]{modeReadWriteConstant});
+        Method getBinaryOutputStream = blobClass.getMethod("getBinaryOutputStream", new Class[0]);
+        OutputStream out = (OutputStream) getBinaryOutputStream.invoke(blob);
+        try {
+            IOUtils.copy(in, out);
+        } finally {
+            try {
+                out.flush();
+            } catch (IOException ioe) {
+            }
+            out.close();
+        }
+        Method close = blobClass.getMethod("close", new Class[0]);
+        close.invoke(blob);
+        return (Blob) blob;
+    }
+
+    /**
+     * Frees a temporary oracle.sql.BLOB instance via reflection.
+     */
+    protected void freeTemporaryBlob(Object blob) throws Exception {
+        // blob.freeTemporary();
+        Method freeTemporary = blobClass.getMethod("freeTemporary", new Class[0]);
+        freeTemporary.invoke(blob);
+    }
+
+    /**
+     * A blob store specially for Oracle 9.
+     */
+    class OracleBLOBStore extends DbBlobStore {
+
+        public OracleBLOBStore() throws SQLException {
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        public synchronized void put(String blobId, InputStream in, long size)
+                throws Exception {
+
+            Blob blob = null;
+            try {
+                Statement stmt = connectionManager.executeStmt(blobSelectExistSQL, new Object[]{blobId});
+                ResultSet rs = stmt.getResultSet();
+                // a BLOB exists if the result has at least one entry
+                boolean exists = rs.next();
+                closeResultSet(rs);
+
+                String sql = (exists) ? blobUpdateSQL : blobInsertSQL;
+                blob = createTemporaryBlob(in);
+                connectionManager.executeStmt(sql, new Object[]{blob, blobId});
+            } finally {
+                if (blob != null) {
+                    try {
+                        freeTemporaryBlob(blob);
+                    } catch (Exception e) {
+                    }
+                }
+            }
+        }
+    }
 }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/MSSqlFileSystem.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/MSSqlFileSystem.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/MSSqlFileSystem.java	(.../trunk)	(revision 827970)
@@ -16,7 +16,7 @@
  */
 package org.apache.jackrabbit.core.fs.db;
 
-import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
+import org.apache.jackrabbit.util.Text;
 
 /**
  * <code>MSSqlFileSystem</code> is a JDBC-based <code>FileSystem</code>
@@ -86,12 +86,8 @@
         driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver";
     }
 
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected CheckSchemaOperation createCheckSchemaOperation() {
-        return super.createCheckSchemaOperation().addVariableReplacement(
-            CheckSchemaOperation.TABLE_SPACE_VARIABLE, tableSpace);
-    }
+   protected String createSchemaSql(String sql) {
+       return Text.replace(
+               super.createSchemaSql(sql), TABLE_SPACE_VARIABLE, tableSpace);
+  }
 }
\ No newline at end of file
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/DatabaseFileSystem.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/DatabaseFileSystem.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/DatabaseFileSystem.java	(.../trunk)	(revision 827970)
@@ -20,30 +20,30 @@
 import org.apache.jackrabbit.core.fs.FileSystem;
 import org.apache.jackrabbit.core.fs.FileSystemException;
 import org.apache.jackrabbit.core.fs.FileSystemPathUtil;
-import org.apache.jackrabbit.core.fs.RandomAccessOutputStream;
-import org.apache.jackrabbit.core.persistence.PMContext;
-import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
-import org.apache.jackrabbit.core.util.db.ConnectionHelper;
-import org.apache.jackrabbit.core.util.db.DbUtility;
-import org.apache.jackrabbit.core.util.db.StreamWrapper;
+import org.apache.jackrabbit.util.Text;
 import org.apache.jackrabbit.util.TransientFileFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.sql.DataSource;
-
+import javax.jcr.RepositoryException;
+import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FilterInputStream;
 import java.io.FilterOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.FileInputStream;
-import java.io.RandomAccessFile;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.ArrayList;
+import java.util.HashMap;
 
 /**
  * Base class for database file systems. This class contains common
@@ -55,29 +55,38 @@
  * See the {@link DbFileSystem} for a detailed description of the available
  * configuration options and database behaviour.
  */
-public abstract class DatabaseFileSystem implements FileSystem {
+public class DatabaseFileSystem implements FileSystem {
 
     /**
      * Logger instance
      */
     private static Logger log = LoggerFactory.getLogger(DatabaseFileSystem.class);
 
+    protected static final String SCHEMA_OBJECT_PREFIX_VARIABLE =
+            "${schemaObjectPrefix}";
+
     protected boolean initialized;
 
     protected String schema;
     protected String schemaObjectPrefix;
 
-    // initial size of buffer used to serialize objects
-    protected static final int INITIAL_BUFFER_SIZE = 8192;
-
     /**
      * Whether the schema check must be done during initialization.
      */
     private boolean schemaCheckEnabled = true;
 
-    /** the {@link ConnectionHelper} set in the {@link #init()} method */
-    protected ConnectionHelper conHelper;
+    // initial size of buffer used to serialize objects
+    protected static final int INITIAL_BUFFER_SIZE = 8192;
 
+    // jdbc connection
+    protected Connection con;
+
+    // time to sleep in ms before a reconnect is attempted
+    protected static final int SLEEP_BEFORE_RECONNECT = 10000;
+
+    // the map of prepared statements (key: sql stmt, value: prepared stmt)
+    private HashMap<String, PreparedStatement> preparedStatements = new HashMap<String, PreparedStatement>();
+
     // SQL statements
     protected String selectExistSQL;
     protected String selectFileExistSQL;
@@ -187,19 +196,23 @@
         }
 
         try {
-            conHelper = createConnectionHelper(getDataSource());
+            // setup jdbc connection
+            initConnection();
 
             // make sure schemaObjectPrefix consists of legal name characters only
-            schemaObjectPrefix = conHelper.prepareDbIdentifier(schemaObjectPrefix);
+            prepareSchemaObjectPrefix();
 
             // check if schema objects exist and create them if necessary
             if (isSchemaCheckEnabled()) {
-                createCheckSchemaOperation().run();
+                checkSchema();
             }
 
             // build sql statements
             buildSQLStatements();
 
+            // prepare statements
+            initPreparedStatements();
+
             // finally verify that there's a file system root entry
             verifyRootExists();
 
@@ -212,44 +225,29 @@
     }
 
     /**
-     * @return
-     * @throws Exception
-     */
-    protected abstract DataSource getDataSource() throws Exception;
-
-    /**
-     * This method is called from the {@link #init(PMContext)} method of this class and returns a
-     * {@link ConnectionHelper} instance which is assigned to the {@code conHelper} field. Subclasses may
-     * override it to return a specialized connection helper.
-     * 
-     * @param dataSrc the {@link DataSource} of this persistence manager
-     * @return a {@link ConnectionHelper}
-     * @throws Exception on error
-     */
-    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
-        return new ConnectionHelper(dataSrc, false);
-    }
-
-    /**
-     * This method is called from {@link #init(PMContext)} after the
-     * {@link #createConnectionHelper(DataSource)} method, and returns a default {@link CheckSchemaOperation}.
-     * Subclasses can overrride this implementation to get a customized implementation.
-     * 
-     * @return a new {@link CheckSchemaOperation} instance
-     */
-    protected CheckSchemaOperation createCheckSchemaOperation() {
-        InputStream in = DatabaseFileSystem.class.getResourceAsStream(getSchema() + ".ddl");
-        return new CheckSchemaOperation(conHelper, in, schemaObjectPrefix + "FSENTRY").addVariableReplacement(
-            CheckSchemaOperation.SCHEMA_OBJECT_PREFIX_VARIABLE, schemaObjectPrefix);
-    }
-
-    /**
      * {@inheritDoc}
      */
     public void close() throws FileSystemException {
         if (!initialized) {
             throw new IllegalStateException("not initialized");
         }
+
+        try {
+            // close shared prepared statements
+            for (PreparedStatement prep : preparedStatements.values()) {
+                closeStatement(prep);
+            }
+            preparedStatements.clear();
+
+            // close jdbc connection
+            closeConnection(con);
+        } catch (SQLException e) {
+            String msg = "error closing file system";
+            log.error(msg, e);
+            throw new FileSystemException(msg, e);
+        } finally {
+            initialized = false;
+        }
     }
 
     /**
@@ -285,8 +283,9 @@
         int count = 0;
         synchronized (deleteFileSQL) {
             try {
-                count = conHelper.update(
+                Statement stmt = executeStmt(
                         deleteFileSQL, new Object[]{parentDir, name});
+                count = stmt.getUpdateCount();
             } catch (SQLException e) {
                 String msg = "failed to delete file: " + filePath;
                 log.error(msg, e);
@@ -319,11 +318,12 @@
         int count = 0;
         synchronized (deleteFolderSQL) {
             try {
-                count = conHelper.update(deleteFolderSQL, new Object[]{
+                Statement stmt = executeStmt(deleteFolderSQL, new Object[]{
                         parentDir,
                         name,
                         folderPath,
                         folderPath + FileSystem.SEPARATOR + "%"});
+                count = stmt.getUpdateCount();
             } catch (SQLException e) {
                 String msg = "failed to delete folder: " + folderPath;
                 log.error(msg, e);
@@ -352,8 +352,9 @@
         synchronized (selectExistSQL) {
             ResultSet rs = null;
             try {
-                rs = conHelper.exec(
-                        selectExistSQL, new Object[]{parentDir, name}, false, 0);
+                Statement stmt = executeStmt(
+                        selectExistSQL, new Object[]{parentDir, name});
+                rs = stmt.getResultSet();
 
                 // a file system entry exists if the result set
                 // has at least one entry
@@ -363,7 +364,7 @@
                 log.error(msg, e);
                 throw new FileSystemException(msg, e);
             } finally {
-                DbUtility.close(rs);
+                closeResultSet(rs);
             }
         }
     }
@@ -384,8 +385,9 @@
         synchronized (selectFileExistSQL) {
             ResultSet rs = null;
             try {
-                rs = conHelper.exec(
-                        selectFileExistSQL, new Object[]{parentDir, name}, false, 0);
+                Statement stmt = executeStmt(
+                        selectFileExistSQL, new Object[]{parentDir, name});
+                rs = stmt.getResultSet();
 
                 // a file exists if the result set has at least one entry
                 return rs.next();
@@ -394,7 +396,7 @@
                 log.error(msg, e);
                 throw new FileSystemException(msg, e);
             } finally {
-                DbUtility.close(rs);
+                closeResultSet(rs);
             }
         }
     }
@@ -415,8 +417,9 @@
         synchronized (selectFolderExistSQL) {
             ResultSet rs = null;
             try {
-                rs = conHelper.exec(
-                        selectFolderExistSQL, new Object[]{parentDir, name}, false, 0);
+                Statement stmt = executeStmt(
+                        selectFolderExistSQL, new Object[]{parentDir, name});
+                rs = stmt.getResultSet();
 
                 // a folder exists if the result set has at least one entry
                 return rs.next();
@@ -425,7 +428,7 @@
                 log.error(msg, e);
                 throw new FileSystemException(msg, e);
             } finally {
-                DbUtility.close(rs);
+                closeResultSet(rs);
             }
         }
     }
@@ -446,8 +449,9 @@
         synchronized (selectLastModifiedSQL) {
             ResultSet rs = null;
             try {
-                rs = conHelper.exec(
-                        selectLastModifiedSQL, new Object[]{parentDir, name}, false, 0);
+                Statement stmt = executeStmt(
+                        selectLastModifiedSQL, new Object[]{parentDir, name});
+                rs = stmt.getResultSet();
                 if (!rs.next()) {
                     throw new FileSystemException("no such file system entry: " + path);
                 }
@@ -457,7 +461,7 @@
                 log.error(msg, e);
                 throw new FileSystemException(msg, e);
             } finally {
-                DbUtility.close(rs);
+                closeResultSet(rs);
             }
         }
     }
@@ -478,8 +482,9 @@
         synchronized (selectLengthSQL) {
             ResultSet rs = null;
             try {
-                rs = conHelper.exec(
-                        selectLengthSQL, new Object[]{parentDir, name}, false, 0);
+                Statement stmt = executeStmt(
+                        selectLengthSQL, new Object[]{parentDir, name});
+                rs = stmt.getResultSet();
                 if (!rs.next()) {
                     throw new FileSystemException("no such file: " + filePath);
                 }
@@ -489,7 +494,7 @@
                 log.error(msg, e);
                 throw new FileSystemException(msg, e);
             } finally {
-                DbUtility.close(rs);
+                closeResultSet(rs);
             }
         }
     }
@@ -511,7 +516,8 @@
         synchronized (selectChildCountSQL) {
             ResultSet rs = null;
             try {
-                rs = conHelper.exec(selectChildCountSQL, new Object[]{path}, false, 0);
+                Statement stmt = executeStmt(selectChildCountSQL, new Object[]{path});
+                rs = stmt.getResultSet();
                 if (!rs.next()) {
                     return false;
                 }
@@ -526,7 +532,7 @@
                 log.error(msg, e);
                 throw new FileSystemException(msg, e);
             } finally {
-                DbUtility.close(rs);
+                closeResultSet(rs);
             }
         }
     }
@@ -548,8 +554,9 @@
         synchronized (selectFileAndFolderNamesSQL) {
             ResultSet rs = null;
             try {
-                rs = conHelper.exec(
-                        selectFileAndFolderNamesSQL, new Object[]{folderPath}, false, 0);
+                Statement stmt = executeStmt(
+                        selectFileAndFolderNamesSQL, new Object[]{folderPath});
+                rs = stmt.getResultSet();
                 ArrayList<String> names = new ArrayList<String>();
                 while (rs.next()) {
                     String name = rs.getString(1);
@@ -566,7 +573,7 @@
                 log.error(msg, e);
                 throw new FileSystemException(msg, e);
             } finally {
-                DbUtility.close(rs);
+                closeResultSet(rs);
             }
         }
     }
@@ -588,8 +595,9 @@
         synchronized (selectFileNamesSQL) {
             ResultSet rs = null;
             try {
-                rs = conHelper.exec(
-                        selectFileNamesSQL, new Object[]{folderPath}, false, 0);
+                Statement stmt = executeStmt(
+                        selectFileNamesSQL, new Object[]{folderPath});
+                rs = stmt.getResultSet();
                 ArrayList<String> names = new ArrayList<String>();
                 while (rs.next()) {
                     names.add(rs.getString(1));
@@ -600,7 +608,7 @@
                 log.error(msg, e);
                 throw new FileSystemException(msg, e);
             } finally {
-                DbUtility.close(rs);
+                closeResultSet(rs);
             }
         }
     }
@@ -622,8 +630,9 @@
         synchronized (selectFolderNamesSQL) {
             ResultSet rs = null;
             try {
-                rs = conHelper.exec(
-                        selectFolderNamesSQL, new Object[]{folderPath}, false, 0);
+                Statement stmt = executeStmt(
+                        selectFolderNamesSQL, new Object[]{folderPath});
+                rs = stmt.getResultSet();
                 ArrayList<String> names = new ArrayList<String>();
                 while (rs.next()) {
                     String name = rs.getString(1);
@@ -634,13 +643,13 @@
                     }
                     names.add(name);
                 }
-                return (String[]) names.toArray(new String[names.size()]);
+                return names.toArray(new String[names.size()]);
             } catch (SQLException e) {
                 String msg = "failed to list folder entries of folder: " + folderPath;
                 log.error(msg, e);
                 throw new FileSystemException(msg, e);
             } finally {
-                DbUtility.close(rs);
+                closeResultSet(rs);
             }
         }
     }
@@ -660,9 +669,10 @@
 
         synchronized (selectDataSQL) {
             try {
-                final ResultSet rs = conHelper.exec(
-                        selectDataSQL, new Object[]{parentDir, name}, false, 0);
+                Statement stmt = executeStmt(
+                        selectDataSQL, new Object[]{parentDir, name});
 
+                final ResultSet rs = stmt.getResultSet();
                 if (!rs.next()) {
                     throw new FileSystemException("no such file: " + filePath);
                 }
@@ -675,7 +685,7 @@
                     public void close() throws IOException {
                         super.close();
                         // close ResultSet
-                       DbUtility.close(rs);
+                        closeResultSet(rs);
                     }
                 };
             } catch (SQLException e) {
@@ -729,9 +739,9 @@
                             synchronized (updateDataSQL) {
                                 long length = tmpFile.length();
                                 in = new FileInputStream(tmpFile);
-                                conHelper.exec(updateDataSQL,
+                                executeStmt(updateDataSQL,
                                         new Object[]{
-                                            new StreamWrapper(in, length),
+                                            new SizedInputStream(in, length),
                                             new Long(System.currentTimeMillis()),
                                             new Long(length),
                                             parentDir,
@@ -742,11 +752,11 @@
                             synchronized (insertFileSQL) {
                                 long length = tmpFile.length();
                                 in = new FileInputStream(tmpFile);
-                                conHelper.exec(insertFileSQL,
+                                executeStmt(insertFileSQL,
                                         new Object[]{
                                             parentDir,
                                             name,
-                                            new StreamWrapper(in, length),
+                                            new SizedInputStream(in, length),
                                             new Long(System.currentTimeMillis()),
                                             new Long(length)
                                         });
@@ -776,6 +786,249 @@
     //----------------------------------< misc. helper methods & overridables >
 
     /**
+     * Initializes the database connection used by this file system.
+     * <p>
+     * Subclasses should normally override the {@link #getConnection()}
+     * method instead of this one. The default implementation calls
+     * {@link #getConnection()} to get the database connection and disables
+     * the autocommit feature.
+     *
+     * @throws Exception if an error occurs
+     */
+    protected void initConnection() throws Exception {
+        con = getConnection();
+        // JCR-1013: Setter may fail unnecessarily on a managed connection
+        if (!con.getAutoCommit()) {
+            con.setAutoCommit(true);
+        }
+    }
+
+    /**
+     * Abstract factory method for creating a new database connection. This
+     * method is called by {@link #initConnection()} when the file system is
+     * started. The returned connection should come with the default JDBC
+     * settings, as the {@link #initConnection()} method will explicitly set
+     * the <code>autoCommit</code> and other properties as needed.
+     * <p>
+     * Note that the returned database connection is kept during the entire
+     * lifetime of the file system, after which it is closed by
+     * {@link #close()} using the {@link #closeConnection(Connection)} method.
+     *
+     * @return new connection
+     * @throws Exception if an error occurs
+     */
+    protected Connection getConnection() throws Exception {
+        throw new UnsupportedOperationException("Override in a subclass!");
+    }
+
+    /**
+     * Closes the given database connection. This method is called by
+     * {@link #close()} to close the connection acquired using
+     * {@link #getConnection()} when the file system was started.
+     * <p>
+     * The default implementation just calls the {@link Connection#close()}
+     * method of the given connection, but subclasses can override this
+     * method to provide more extensive database and connection cleanup.
+     *
+     * @param connection database connection
+     * @throws SQLException if an error occurs
+     */
+    protected void closeConnection(Connection connection) throws SQLException {
+        connection.close();
+    }
+
+    /**
+     * Re-establishes the database connection. This method is called by
+     * {@link #executeStmt(String, Object[])} after a <code>SQLException</code>
+     * had been encountered.
+     *
+     * @return true if the connection could be successfully re-established,
+     *         false otherwise.
+     */
+    protected synchronized boolean reestablishConnection() {
+        // in any case try to shut down current connection
+        // gracefully in order to avoid potential memory leaks
+
+        // close shared prepared statements
+        for (PreparedStatement prep : preparedStatements.values()) {
+            closeStatement(prep);
+        }
+        try {
+            closeConnection(con);
+        } catch (Exception ignore) {
+        }
+
+        // sleep for a while to give database a chance
+        // to restart before a reconnect is attempted
+
+        try {
+            Thread.sleep(SLEEP_BEFORE_RECONNECT);
+        } catch (InterruptedException ignore) {
+        }
+
+        // now try to re-establish connection
+
+        try {
+            initConnection();
+            initPreparedStatements();
+            return true;
+        } catch (Exception e) {
+            log.error("failed to re-establish connection", e);
+            // reconnect failed
+            return false;
+        }
+    }
+
+    /**
+     * Executes the given SQL statement with the specified parameters.
+     * If a <code>SQLException</code> is encountered <i>one</i> attempt is made
+     * to re-establish the database connection and re-execute the statement.
+     *
+     * @param sql    statement to execute
+     * @param params parameters to set
+     * @return the <code>Statement</code> object that had been executed
+     * @throws SQLException if an error occurs
+     */
+    protected Statement executeStmt(String sql, Object[] params)
+            throws SQLException {
+        int trials = 2;
+        while (true) {
+            PreparedStatement stmt = preparedStatements.get(sql);
+            try {
+                for (int i = 0; i < params.length; i++) {
+                    if (params[i] instanceof SizedInputStream) {
+                        SizedInputStream in = (SizedInputStream) params[i];
+                        stmt.setBinaryStream(i + 1, in, (int) in.getSize());
+                    } else {
+                        stmt.setObject(i + 1, params[i]);
+                    }
+                }
+                stmt.execute();
+                resetStatement(stmt);
+                return stmt;
+            } catch (SQLException se) {
+                if (--trials == 0) {
+                    // no more trials, re-throw
+                    throw se;
+                }
+                log.warn("execute failed, about to reconnect...", se.getMessage());
+
+                // try to reconnect
+                if (reestablishConnection()) {
+                    // reconnect succeeded; check whether it's possible to
+                    // re-execute the prepared stmt with the given parameters
+                    for (int i = 0; i < params.length; i++) {
+                        if (params[i] instanceof SizedInputStream) {
+                            SizedInputStream in = (SizedInputStream) params[i];
+                            if (in.isConsumed()) {
+                                // we're unable to re-execute the prepared stmt
+                                // since an InputStream paramater has already
+                                // been 'consumed';
+                                // re-throw previous SQLException
+                                throw se;
+                            }
+                        }
+                    }
+
+                    // try again to execute the statement
+                    continue;
+                } else {
+                    // reconnect failed, re-throw previous SQLException
+                    throw se;
+                }
+            }
+        }
+    }
+
+    /**
+     * Makes sure that <code>schemaObjectPrefix</code> does only consist of
+     * characters that are allowed in names on the target database. Illegal
+     * characters will be escaped as necessary.
+     *
+     * @throws Exception if an error occurs
+     */
+    protected void prepareSchemaObjectPrefix() throws Exception {
+        DatabaseMetaData metaData = con.getMetaData();
+        String legalChars = metaData.getExtraNameCharacters();
+        legalChars += "ABCDEFGHIJKLMNOPQRSTUVWXZY0123456789_";
+
+        String prefix = schemaObjectPrefix.toUpperCase();
+        StringBuffer escaped = new StringBuffer();
+        for (int i = 0; i < prefix.length(); i++) {
+            char c = prefix.charAt(i);
+            if (legalChars.indexOf(c) == -1) {
+                escaped.append("_x");
+                String hex = Integer.toHexString(c);
+                escaped.append("0000".toCharArray(), 0, 4 - hex.length());
+                escaped.append(hex);
+                escaped.append("_");
+            } else {
+                escaped.append(c);
+            }
+        }
+        schemaObjectPrefix = escaped.toString();
+    }
+
+    /**
+     * Checks if the required schema objects exist and creates them if they
+     * don't exist yet.
+     *
+     * @throws Exception if an error occurs
+     */
+    protected void checkSchema() throws Exception {
+        DatabaseMetaData metaData = con.getMetaData();
+        String tableName = schemaObjectPrefix + "FSENTRY";
+        if (metaData.storesLowerCaseIdentifiers()) {
+            tableName = tableName.toLowerCase();
+        } else if (metaData.storesUpperCaseIdentifiers()) {
+            tableName = tableName.toUpperCase();
+        }
+        ResultSet rs = metaData.getTables(null, null, tableName, null);
+        boolean schemaExists;
+        try {
+            schemaExists = rs.next();
+        } finally {
+            rs.close();
+        }
+
+        if (!schemaExists) {
+            // read ddl from resources
+            InputStream in = DatabaseFileSystem.class.getResourceAsStream(schema + ".ddl");
+            if (in == null) {
+                String msg = "Configuration error: unknown schema '" + schema + "'";
+                log.debug(msg);
+                throw new RepositoryException(msg);
+            }
+            BufferedReader reader = new BufferedReader(new InputStreamReader(in));
+            Statement stmt = con.createStatement();
+            try {
+                String sql = reader.readLine();
+                while (sql != null) {
+                    // Skip comments and empty lines
+                    if (!sql.startsWith("#") && sql.length() > 0) {
+                        // replace prefix variable
+                        sql = createSchemaSql(sql);
+                        stmt.executeUpdate(sql);
+                    }
+                    // read next sql stmt
+                    sql = reader.readLine();
+                }
+            } finally {
+                IOUtils.closeQuietly(in);
+                closeStatement(stmt);
+            }
+        }
+    }
+
+    /**
+     * Replace wildcards.
+     */
+    protected String createSchemaSql(String sql) {
+        sql = Text.replace(sql, SCHEMA_OBJECT_PREFIX_VARIABLE, schemaObjectPrefix);
+        return sql;
+    }
+
+    /**
      * Builds the SQL statements
      */
     protected void buildSQLStatements() {
@@ -869,6 +1122,51 @@
     }
 
     /**
+     * Initializes the map of prepared statements.
+     *
+     * @throws SQLException if an error occurs
+     */
+    protected void initPreparedStatements() throws SQLException {
+        preparedStatements.put(
+                selectExistSQL, con.prepareStatement(selectExistSQL));
+        preparedStatements.put(
+                selectFileExistSQL, con.prepareStatement(selectFileExistSQL));
+        preparedStatements.put(
+                selectFolderExistSQL, con.prepareStatement(selectFolderExistSQL));
+        preparedStatements.put(
+                selectChildCountSQL, con.prepareStatement(selectChildCountSQL));
+        preparedStatements.put(
+                selectDataSQL, con.prepareStatement(selectDataSQL));
+        preparedStatements.put(
+                selectLastModifiedSQL, con.prepareStatement(selectLastModifiedSQL));
+        preparedStatements.put(
+                selectLengthSQL, con.prepareStatement(selectLengthSQL));
+        preparedStatements.put(
+                selectFileNamesSQL, con.prepareStatement(selectFileNamesSQL));
+        preparedStatements.put(
+                selectFolderNamesSQL, con.prepareStatement(selectFolderNamesSQL));
+        preparedStatements.put(
+                selectFileAndFolderNamesSQL, con.prepareStatement(selectFileAndFolderNamesSQL));
+        preparedStatements.put(
+                deleteFileSQL, con.prepareStatement(deleteFileSQL));
+        preparedStatements.put(
+                deleteFolderSQL, con.prepareStatement(deleteFolderSQL));
+        preparedStatements.put(
+                insertFileSQL, con.prepareStatement(insertFileSQL));
+        preparedStatements.put(
+                insertFolderSQL, con.prepareStatement(insertFolderSQL));
+        preparedStatements.put(
+                updateDataSQL, con.prepareStatement(updateDataSQL));
+        preparedStatements.put(
+                updateLastModifiedSQL, con.prepareStatement(updateLastModifiedSQL));
+        preparedStatements.put(
+                copyFileSQL, con.prepareStatement(copyFileSQL));
+        preparedStatements.put(
+                copyFilesSQL, con.prepareStatement(copyFilesSQL));
+
+    }
+
+    /**
      * Verifies that the root file system entry exists. If it doesn't exist yet
      * it will be automatically created.
      *
@@ -879,9 +1177,10 @@
         synchronized (selectFolderExistSQL) {
             ResultSet rs = null;
             try {
-                rs = conHelper.exec(
+                Statement stmt = executeStmt(
                         selectFolderExistSQL,
-                        new Object[]{FileSystem.SEPARATOR, ""}, false, 0);
+                        new Object[]{FileSystem.SEPARATOR, ""});
+                rs = stmt.getResultSet();
 
                 if (rs.next()) {
                     // root entry exists
@@ -892,7 +1191,7 @@
                 log.error(msg, e);
                 throw new FileSystemException(msg, e);
             } finally {
-                DbUtility.close(rs);
+                closeResultSet(rs);
             }
         }
 
@@ -920,7 +1219,7 @@
 
         synchronized (insertFolderSQL) {
             try {
-                conHelper.exec(
+                executeStmt(
                         insertFolderSQL,
                         new Object[]{
                                 parentDir,
@@ -934,4 +1233,88 @@
         }
     }
 
+    /**
+     * Resets the given <code>PreparedStatement</code> by clearing the parameters
+     * and warnings contained.
+     * <p/>
+     * NOTE: This method MUST be called in a synchronized context as neither
+     * this method nor the <code>PreparedStatement</code> instance on which it
+     * operates are thread safe.
+     *
+     * @param stmt The <code>PreparedStatement</code> to reset. If
+     *             <code>null</code> this method does nothing.
+     */
+    protected void resetStatement(PreparedStatement stmt) {
+        if (stmt != null) {
+            try {
+                stmt.clearParameters();
+                stmt.clearWarnings();
+            } catch (SQLException se) {
+                log.error("failed resetting PreparedStatement", se);
+            }
+        }
+    }
+
+    protected void closeResultSet(ResultSet rs) {
+        if (rs != null) {
+            try {
+                rs.close();
+            } catch (SQLException se) {
+                log.error("failed closing ResultSet", se);
+            }
+        }
+    }
+
+    protected void closeStatement(Statement stmt) {
+        if (stmt != null) {
+            try {
+                stmt.close();
+            } catch (SQLException se) {
+                log.error("failed closing Statement", se);
+            }
+        }
+    }
+
+    //--------------------------------------------------------< inner classes >
+
+    /**
+     * An input stream that knows its size.
+     */
+    class SizedInputStream extends FilterInputStream {
+        private final long size;
+        private boolean consumed;
+
+        SizedInputStream(InputStream in, long size) {
+            super(in);
+            this.size = size;
+        }
+
+        long getSize() {
+            return size;
+        }
+
+        boolean isConsumed() {
+            return consumed;
+        }
+
+        public int read() throws IOException {
+            consumed = true;
+            return super.read();
+        }
+
+        public long skip(long n) throws IOException {
+            consumed = true;
+            return super.skip(n);
+        }
+
+        public int read(byte[] b) throws IOException {
+            consumed = true;
+            return super.read(b);
+        }
+
+        public int read(byte[] b, int off, int len) throws IOException {
+            consumed = true;
+            return super.read(b, off, len);
+        }
+    }
 }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/DbFileSystem.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/DbFileSystem.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/DbFileSystem.java	(.../trunk)	(revision 827970)
@@ -16,11 +16,13 @@
  */
 package org.apache.jackrabbit.core.fs.db;
 
-import org.apache.jackrabbit.core.util.db.ConnectionFactory;
-import org.apache.jackrabbit.core.util.db.DatabaseAware;
+import org.apache.jackrabbit.core.persistence.bundle.util.ConnectionFactory;
 
-import javax.sql.DataSource;
+import java.sql.Connection;
+import java.sql.SQLException;
 
+import javax.jcr.RepositoryException;
+
 /**
  * <code>DbFileSystem</code> is a generic JDBC-based <code>FileSystem</code>
  * implementation for Jackrabbit that persists file system entries in a
@@ -106,7 +108,7 @@
  * </pre>
  * See also {@link DerbyFileSystem}, {@link DB2FileSystem}, {@link OracleFileSystem}.
  */
-public class DbFileSystem extends DatabaseFileSystem implements DatabaseAware {
+public class DbFileSystem extends DatabaseFileSystem {
 
     /**
      * the full qualified JDBC driver name
@@ -128,20 +130,6 @@
      */
     protected String password;
 
-    protected String dataSourceName;
-
-    /**
-     * The repositories {@link ConnectionFactory}.
-     */
-    private ConnectionFactory connectionFactory;
-
-    /**
-     * {@inheritDoc}
-     */
-    public void setConnectionFactory(ConnectionFactory connnectionFactory) {
-        this.connectionFactory = connnectionFactory;
-    }
-
     //----------------------------------------------------< setters & getters >
     public String getUrl() {
         return url;
@@ -175,14 +163,7 @@
         this.driver = driver;
     }
 
-    public String getDataSourceName() {
-        return dataSourceName;
-    }
 
-    public void setDataSourceName(String dataSourceName) {
-        this.dataSourceName = dataSourceName;
-    }
-
     //-------------------------------------------< java.lang.Object overrides >
     /**
      * {@inheritDoc}
@@ -218,18 +199,12 @@
     //--------------------------------------------------< DatabaseFileSystem >
 
     /**
-     * {@inheritDoc}
+     * Initialize the JDBC connection.
+     *
+     * @throws SQLException if an error occurs
      */
-    @Override
-    protected final DataSource getDataSource() throws Exception {
-        if (getDataSourceName() == null || "".equals(getDataSourceName())) {
-            return connectionFactory.getDataSource(getDriver(), getUrl(), getUser(), getPassword());
-        } else {
-            String dbType = connectionFactory.getDataBaseType(dataSourceName);
-            if (DatabaseFileSystem.class.getResourceAsStream(dbType + ".ddl") != null) {
-                setSchema(dbType);
-            }
-            return connectionFactory.getDataSource(dataSourceName);
-        }
+    protected Connection getConnection() throws RepositoryException, SQLException {
+        return ConnectionFactory.getConnection(driver, url, user, password);
     }
+
 }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/DerbyFileSystem.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/DerbyFileSystem.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/DerbyFileSystem.java	(.../trunk)	(revision 827970)
@@ -16,16 +16,13 @@
  */
 package org.apache.jackrabbit.core.fs.db;
 
-import org.apache.jackrabbit.core.fs.FileSystemException;
-import org.apache.jackrabbit.core.util.db.ConnectionHelper;
-import org.apache.jackrabbit.core.util.db.DerbyConnectionHelper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.sql.DriverManager;
 import java.sql.SQLException;
+import java.sql.Connection;
 
-import javax.sql.DataSource;
-
 /**
  * <code>DerbyFileSystem</code> is a JDBC-based <code>FileSystem</code>
  * implementation for Jackrabbit that persists file system entries in an
@@ -59,6 +56,11 @@
 public class DerbyFileSystem extends DbFileSystem {
 
     /**
+     * Logger instance
+     */
+    private static Logger log = LoggerFactory.getLogger(DerbyFileSystem.class);
+
+    /**
      * Flag indicating whether this derby database should be shutdown on close.
      */
     protected boolean shutdownOnClose;
@@ -87,24 +89,46 @@
     //-----------------------------------------------< DbFileSystem overrides >
 
     /**
-     * {@inheritDoc}
+     * Closes the given connection and shuts down the embedded Derby
+     * database if <code>shutdownOnClose</code> is set to true.
+     *
+     * @param connection database connection
+     * @throws SQLException if an error occurs
+     * @see DatabaseFileSystem#closeConnection(Connection)
      */
-    @Override
-    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
-        return new DerbyConnectionHelper(dataSrc, false);
-    }
+    protected void closeConnection(Connection connection) throws SQLException {
+        // prepare connection url for issuing shutdown command
+        String url;
+        try {
+            url = connection.getMetaData().getURL();
+        } catch (SQLException e) {
+            // JCR-1557: embedded derby db probably already shut down;
+            // this happens when configuring multiple FS/PM instances
+            // to use the same embedded derby db instance.
+            log.debug("failed to retrieve connection url: embedded db probably already shut down", e);
+            return;
+        }
+        int pos = url.lastIndexOf(';');
+        if (pos != -1) {
+            // strip any attributes from connection url
+            url = url.substring(0, pos);
+        }
+        url += ";shutdown=true";
 
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void close() throws FileSystemException {
-        super.close();
+        // we have to reset the connection to 'autoCommit=true' before closing it;
+        // otherwise Derby would mysteriously complain about some pending uncommitted
+        // changes which can't possibly be true.
+        // @todo further investigate
+        connection.setAutoCommit(true);
+        connection.close();
+
         if (shutdownOnClose) {
+            // now it's safe to shutdown the embedded Derby database
             try {
-                ((DerbyConnectionHelper) conHelper).shutDown(driver);
+                DriverManager.getConnection(url);
             } catch (SQLException e) {
-                throw new FileSystemException("failed to shutdown Derby", e);
+                // a shutdown command always raises a SQLException
+                log.info(e.getMessage());
             }
         }
     }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/OracleFileSystem.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/OracleFileSystem.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/OracleFileSystem.java	(.../trunk)	(revision 827970)
@@ -16,11 +16,30 @@
  */
 package org.apache.jackrabbit.core.fs.db;
 
-import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
-import org.apache.jackrabbit.core.util.db.ConnectionHelper;
-import org.apache.jackrabbit.core.util.db.Oracle10R1ConnectionHelper;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.util.Text;
+import org.apache.jackrabbit.util.TransientFileFactory;
+import org.apache.jackrabbit.core.fs.FileSystemException;
+import org.apache.jackrabbit.core.fs.FileSystemPathUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import javax.sql.DataSource;
+import javax.jcr.RepositoryException;
+import java.sql.DatabaseMetaData;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.sql.Blob;
+import java.sql.Connection;
+import java.io.InputStream;
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.IOException;
+import java.io.File;
+import java.io.FilterOutputStream;
+import java.io.FileOutputStream;
+import java.io.FileInputStream;
+import java.lang.reflect.Method;
 
 /**
  * <code>OracleFileSystem</code> is a JDBC-based <code>FileSystem</code>
@@ -55,8 +74,21 @@
  */
 public class OracleFileSystem extends DbFileSystem {
 
+    /**
+     * Logger instance
+     */
+    private static Logger log = LoggerFactory.getLogger(OracleFileSystem.class);
+
+    private Class< ? > blobClass;
+    private Integer durationSessionConstant;
+    private Integer modeReadWriteConstant;
+
+    /** the variable for the Oracle table space */
+    public static final String TABLE_SPACE_VARIABLE =
+        "${tableSpace}";
+
     /** the Oracle table space to use */
-    protected String tableSpace = "";
+    protected String tableSpace;
 
     /**
      * Creates a new <code>OracleFileSystem</code> instance.
@@ -82,36 +114,110 @@
      * @param tableSpace the Oracle table space.
      */
     public void setTableSpace(String tableSpace) {
-        if (tableSpace != null && tableSpace.trim().length() > 0) {
-            this.tableSpace = "tablespace " + tableSpace.trim();
+        if (tableSpace != null) {
+            this.tableSpace = tableSpace.trim();
         } else {
-            this.tableSpace = "";
+            this.tableSpace = null;
         }
     }
 
     //-----------------------------------------< DatabaseFileSystem overrides >
-
     /**
      * {@inheritDoc}
+     * <p/>
+     * Retrieve the <code>oracle.sql.BLOB</code> class via reflection, and
+     * initialize the values for the <code>DURATION_SESSION</code> and
+     * <code>MODE_READWRITE</code> constants defined there.
+     * @see oracle.sql.BLOB#DURATION_SESSION
+     * @see oracle.sql.BLOB#MODE_READWRITE
      */
-    @Override
-    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
-        Oracle10R1ConnectionHelper helper = new Oracle10R1ConnectionHelper(dataSrc, false);
-        helper.init();
-        return helper;
+    public void init() throws FileSystemException {
+        super.init();
+
+        // initialize oracle.sql.BLOB class & constants
+
+        // use the Connection object for using the exact same
+        // class loader that the Oracle driver was loaded with
+        try {
+            blobClass = con.getClass().getClassLoader().loadClass("oracle.sql.BLOB");
+            durationSessionConstant =
+                    new Integer(blobClass.getField("DURATION_SESSION").getInt(null));
+            modeReadWriteConstant =
+                    new Integer(blobClass.getField("MODE_READWRITE").getInt(null));
+        } catch (Exception e) {
+            String msg = "failed to load/introspect oracle.sql.BLOB";
+            log.error(msg, e);
+            throw new FileSystemException(msg, e);
+        }
     }
 
     /**
      * {@inheritDoc}
+     * <p/>
+     * Overridden in order to support multiple oracle schemas. Note that
+     * schema names in Oracle correspond to the username of the connection.
+     * See http://issues.apache.org/jira/browse/JCR-582
+     *
+     * @throws Exception if an error occurs
      */
-    @Override
-    protected CheckSchemaOperation createCheckSchemaOperation() {
-        return super.createCheckSchemaOperation().addVariableReplacement(
-            CheckSchemaOperation.TABLE_SPACE_VARIABLE, tableSpace);
+    protected void checkSchema() throws Exception {
+        DatabaseMetaData metaData = con.getMetaData();
+        String tableName = schemaObjectPrefix + "FSENTRY";
+        if (metaData.storesLowerCaseIdentifiers()) {
+            tableName = tableName.toLowerCase();
+        } else if (metaData.storesUpperCaseIdentifiers()) {
+            tableName = tableName.toUpperCase();
+        }
+        String userName = metaData.getUserName();
+
+        ResultSet rs = metaData.getTables(null, userName, tableName, null);
+        boolean schemaExists;
+        try {
+            schemaExists = rs.next();
+        } finally {
+            rs.close();
+        }
+
+        if (!schemaExists) {
+            // read ddl from resources
+            InputStream in = OracleFileSystem.class.getResourceAsStream(schema + ".ddl");
+            if (in == null) {
+                String msg = "Configuration error: unknown schema '" + schema + "'";
+                log.debug(msg);
+                throw new RepositoryException(msg);
+            }
+            BufferedReader reader = new BufferedReader(new InputStreamReader(in));
+            Statement stmt = con.createStatement();
+            try {
+                String sql = reader.readLine();
+                while (sql != null) {
+                    // Skip comments and empty lines
+                    if (!sql.startsWith("#") && sql.length() > 0) {
+                        // replace prefix variable
+                        sql = Text.replace(sql, SCHEMA_OBJECT_PREFIX_VARIABLE, schemaObjectPrefix);
+
+                        // set the tablespace if it is defined
+                        String tspace;
+                        if (tableSpace == null || "".equals(tableSpace)) {
+                            tspace = "";
+                        } else {
+                            tspace = "tablespace " + tableSpace;
+                        }
+                        sql = Text.replace(sql, TABLE_SPACE_VARIABLE, tspace).trim();
+
+                        // execute sql stmt
+                        stmt.executeUpdate(sql);
+                    }
+                    // read next sql stmt
+                    sql = reader.readLine();
+                }
+            } finally {
+                IOUtils.closeQuietly(in);
+                closeStatement(stmt);
+            }
+        }
     }
 
-    //-----------------------------------------< DatabaseFileSystem overrides >
-    
     /**
      * Builds the SQL statements
      * <p/>
@@ -220,4 +326,149 @@
                 + schemaObjectPrefix + "FSENTRY where FSENTRY_PATH = ? "
                 + "and FSENTRY_LENGTH is not null";
     }
+
+
+    /**
+     * {@inheritDoc}
+     * <p/>
+     * Overridden because we need to use <code>oracle.sql.BLOB</code>
+     * and <code>PreparedStatement#setBlob</code> instead of just
+     * <code>PreparedStatement#setBinaryStream</code>.
+     */
+    public OutputStream getOutputStream(final String filePath) throws FileSystemException {
+        if (!initialized) {
+            throw new IllegalStateException("not initialized");
+        }
+
+        FileSystemPathUtil.checkFormat(filePath);
+
+        final String parentDir = FileSystemPathUtil.getParentDir(filePath);
+        final String name = FileSystemPathUtil.getName(filePath);
+
+        if (!isFolder(parentDir)) {
+            throw new FileSystemException("path not found: " + parentDir);
+        }
+
+        if (isFolder(filePath)) {
+            throw new FileSystemException("path denotes folder: " + filePath);
+        }
+
+        try {
+            TransientFileFactory fileFactory = TransientFileFactory.getInstance();
+            final File tmpFile = fileFactory.createTransientFile("bin", null, null);
+
+            return new FilterOutputStream(new FileOutputStream(tmpFile)) {
+
+                public void write(byte[] bytes, int off, int len) throws IOException {
+                    out.write(bytes, off, len);
+                }
+
+                public void close() throws IOException {
+                    out.flush();
+                    ((FileOutputStream) out).getFD().sync();
+                    out.close();
+
+                    InputStream in = null;
+                    Blob blob = null;
+                    try {
+                        if (isFile(filePath)) {
+                            synchronized (updateDataSQL) {
+                                long length = tmpFile.length();
+                                in = new FileInputStream(tmpFile);
+                                blob = createTemporaryBlob(in);
+                                executeStmt(updateDataSQL,
+                                        new Object[]{
+                                            blob,
+                                            new Long(System.currentTimeMillis()),
+                                            new Long(length),
+                                            parentDir,
+                                            name
+                                        });
+                            }
+                        } else {
+                            synchronized (insertFileSQL) {
+                                long length = tmpFile.length();
+                                in = new FileInputStream(tmpFile);
+                                blob = createTemporaryBlob(in);
+                                executeStmt(insertFileSQL,
+                                        new Object[]{
+                                            parentDir,
+                                            name,
+                                            blob,
+                                            new Long(System.currentTimeMillis()),
+                                            new Long(length)
+                                        });
+                            }
+                        }
+                    } catch (Exception e) {
+                        IOException ioe = new IOException(e.getMessage());
+                        ioe.initCause(e);
+                        throw ioe;
+                    } finally {
+                        if (blob != null) {
+                            try {
+                                freeTemporaryBlob(blob);
+                            } catch (Exception e1) {
+                            }
+                        }
+                        IOUtils.closeQuietly(in);
+                        // temp file can now safely be removed
+                        tmpFile.delete();
+                    }
+                }
+            };
+        } catch (Exception e) {
+            String msg = "failed to open output stream to file: " + filePath;
+            log.error(msg, e);
+            throw new FileSystemException(msg, e);
+        }
+    }
+
+    //----------------------------------------< oracle-specific blob handling >
+    /**
+     * Creates a temporary oracle.sql.BLOB instance via reflection and spools
+     * the contents of the specified stream.
+     */
+    protected Blob createTemporaryBlob(InputStream in) throws Exception {
+        /*
+        BLOB blob = BLOB.createTemporary(con, false, BLOB.DURATION_SESSION);
+        blob.open(BLOB.MODE_READWRITE);
+        OutputStream out = blob.getBinaryOutputStream();
+        ...
+        out.flush();
+        out.close();
+        blob.close();
+        return blob;
+        */
+        Method createTemporary = blobClass.getMethod("createTemporary",
+                new Class[]{Connection.class, Boolean.TYPE, Integer.TYPE});
+        Object blob = createTemporary.invoke(null,
+                new Object[]{con, Boolean.FALSE, durationSessionConstant});
+        Method open = blobClass.getMethod("open", new Class[]{Integer.TYPE});
+        open.invoke(blob, new Object[]{modeReadWriteConstant});
+        Method getBinaryOutputStream =
+                blobClass.getMethod("getBinaryOutputStream", new Class[0]);
+        OutputStream out = (OutputStream) getBinaryOutputStream.invoke(blob);
+        try {
+            IOUtils.copy(in, out);
+        } finally {
+            try {
+                out.flush();
+            } catch (IOException ioe) {
+            }
+            out.close();
+        }
+        Method close = blobClass.getMethod("close", new Class[0]);
+        close.invoke(blob);
+        return (Blob) blob;
+    }
+
+    /**
+     * Frees a temporary oracle.sql.BLOB instance via reflection.
+     */
+    protected void freeTemporaryBlob(Object blob) throws Exception {
+        // blob.freeTemporary();
+        Method freeTemporary = blobClass.getMethod("freeTemporary", new Class[0]);
+        freeTemporary.invoke(blob);
+    }
 }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/JNDIDatabaseFileSystem.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/JNDIDatabaseFileSystem.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/db/JNDIDatabaseFileSystem.java	(.../trunk)	(revision 827970)
@@ -17,7 +17,10 @@
 package org.apache.jackrabbit.core.fs.db;
 
 import javax.naming.InitialContext;
+import javax.naming.NamingException;
 import javax.sql.DataSource;
+import java.sql.Connection;
+import java.sql.SQLException;
 
 /**
  * @deprecated
@@ -65,11 +68,17 @@
     //--------------------------------------------------< DatabaseFileSystem >
 
     /**
-     * {@inheritDoc}
+     * Returns a JDBC connection from a {@link DataSource} acquired from JNDI
+     * with the configured data source location.
+     *
+     * @return new database connection
+     * @throws NamingException if the given data source location does not exist
+     * @throws SQLException if a database access error occurs
      */
-    @Override
-    protected DataSource getDataSource() throws Exception {
+    protected Connection getConnection() throws NamingException, SQLException {
         InitialContext ic = new InitialContext();
-        return (DataSource) ic.lookup(dataSourceLocation);
+        DataSource dataSource = (DataSource) ic.lookup(dataSourceLocation);
+        return dataSource.getConnection();
     }
+
 }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DbInputStream.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DbInputStream.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DbInputStream.java	(.../trunk)	(revision 827970)
@@ -23,7 +23,7 @@
 import org.apache.commons.io.input.AutoCloseInputStream;
 import org.apache.jackrabbit.core.data.DataIdentifier;
 import org.apache.jackrabbit.core.data.DataStoreException;
-import org.apache.jackrabbit.core.util.db.DbUtility;
+import org.apache.jackrabbit.core.persistence.bundle.util.ConnectionRecoveryManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -39,7 +39,8 @@
     protected DbDataStore store;
     protected DataIdentifier identifier;
     protected boolean endOfStream;
-
+    
+    protected ConnectionRecoveryManager conn;
     protected ResultSet rs;
     
 
@@ -129,9 +130,16 @@
             // some additional database objects 
             // may need to be closed
             if (rs != null) {
-                DbUtility.close(rs);
+                DatabaseHelper.closeSilently(rs);
                 rs = null;
             }
+            if (conn != null) {
+                try {
+                    store.putBack(conn);
+                } catch (DataStoreException e) {
+                    log.info("Error closing DbResource", e);
+                }
+            }
         }
     }
 
@@ -200,6 +208,16 @@
     }
 
     /**
+     * Set the database connection of this input stream. This object must be
+     * closed once the stream is closed.
+     * 
+     * @param conn the connection
+     */
+    void setConnection(ConnectionRecoveryManager conn) {
+        this.conn = conn;
+    }
+
+    /**
      * Set the result set of this input stream. This object must be closed once
      * the stream is closed.
      * 
@@ -208,4 +226,5 @@
     void setResultSet(ResultSet rs) {
         this.rs = rs;
     }
+
 }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DatabaseHelper.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DatabaseHelper.java	(.../sandbox/JCR-1456)	(revision 0)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DatabaseHelper.java	(.../trunk)	(revision 827970)
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.data.db;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Helper methods for database operations.
+ */
+public class DatabaseHelper {
+
+    private static Logger log = LoggerFactory.getLogger(DatabaseHelper.class);
+
+    /**
+     * Silently closes a connection.
+     */
+    public static void closeSilently(Connection con) {
+        try {
+            if (con != null) {
+                con.close();
+            }
+        } catch (SQLException e) {
+            log.info("Couldn't close connection: ", e);
+        }
+    }
+
+    /**
+     * Silently closes a result set.
+     */
+    public static void closeSilently(ResultSet rs) {
+        try {
+            if (rs != null) {
+                rs.close();
+            }
+        } catch (SQLException e) {
+            log.info("Couldn't close result set: ", e);
+        }
+    }
+
+    /**
+     * Silently closes a statement.
+     */
+    public static void closeSilently(Statement stmt) {
+        try {
+            if (stmt != null) {
+                stmt.close();
+            }
+        } catch (SQLException e) {
+            log.info("Couldn't close statement: ", e);
+        }
+    }
+}

Property changes on: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DatabaseHelper.java
___________________________________________________________________
Added: svn:eol-style
   + native
Added: svn:keywords
   + Author Date Id Revision Rev URL

Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/Pool.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/Pool.java	(.../sandbox/JCR-1456)	(revision 0)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/Pool.java	(.../trunk)	(revision 827970)
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.data.db;
+
+import java.util.ArrayList;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import javax.jcr.RepositoryException;
+
+import org.apache.jackrabbit.core.persistence.bundle.util.ConnectionRecoveryManager;
+
+/**
+ * Implementation of a simple ConnectionRecoveryManager pool.
+ * The maximum number of pooled objects can be set, and if more objects
+ * are requested the pool waits until one object is put back.
+ */
+public class Pool {
+    protected final int maxSize;
+    protected final ArrayList<ConnectionRecoveryManager> all = new ArrayList<ConnectionRecoveryManager>();
+    protected final DbDataStore factory;
+    protected final LinkedBlockingQueue<ConnectionRecoveryManager> pool = new LinkedBlockingQueue<ConnectionRecoveryManager>();
+
+    /**
+     * Create a new pool using the given factory and maximum pool size.
+     *
+     * @param factory the db data store
+     * @param maxSize the maximum number of objects in the pool.
+     */
+    protected Pool(DbDataStore factory, int maxSize) {
+        this.factory = factory;
+        this.maxSize = Math.max(1, maxSize);
+    }
+
+    /**
+     * Get a connection from the pool. This method may open a new connection if
+     * required, or if the maximum number of connections are opened, it will
+     * wait for one to be returned.
+     *
+     * @return the connection
+     */
+    protected ConnectionRecoveryManager get() throws InterruptedException, RepositoryException {
+        ConnectionRecoveryManager o = pool.poll();
+        if (o == null) {
+            synchronized (all) {
+                if (all.size() < maxSize) {
+                    o = factory.createNewConnection();
+                    all.add(o);
+                }
+            }
+            if (o == null) {
+                o = pool.take();
+            }
+        }
+        return o;
+    }
+
+    /**
+     * But a connection back into the pool.
+     *
+     * @param o the connection
+     */
+    protected void add(ConnectionRecoveryManager o) throws InterruptedException {
+        pool.put(o);
+    }
+
+    /**
+     * Get all connections (even if they are currently being used).
+     *
+     * @return all connections
+     */
+    protected ArrayList<ConnectionRecoveryManager> getAll() {
+        return all;
+    }
+}

Property changes on: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/Pool.java
___________________________________________________________________
Added: svn:eol-style
   + native

Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DbDataStore.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DbDataStore.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DbDataStore.java	(.../trunk)	(revision 827970)
@@ -20,13 +20,9 @@
 import org.apache.jackrabbit.core.data.DataRecord;
 import org.apache.jackrabbit.core.data.DataStore;
 import org.apache.jackrabbit.core.data.DataStoreException;
+import org.apache.jackrabbit.core.persistence.bundle.util.ConnectionRecoveryManager;
 import org.apache.jackrabbit.core.persistence.bundle.util.TrackingInputStream;
-import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
-import org.apache.jackrabbit.core.util.db.ConnectionFactory;
-import org.apache.jackrabbit.core.util.db.ConnectionHelper;
-import org.apache.jackrabbit.core.util.db.DatabaseAware;
-import org.apache.jackrabbit.core.util.db.DbUtility;
-import org.apache.jackrabbit.core.util.db.StreamWrapper;
+import org.apache.jackrabbit.core.persistence.bundle.util.ConnectionRecoveryManager.StreamWrapper;
 import org.apache.jackrabbit.util.Text;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -40,6 +36,8 @@
 import java.security.DigestInputStream;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
+import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -51,7 +49,6 @@
 import java.util.WeakHashMap;
 
 import javax.jcr.RepositoryException;
-import javax.sql.DataSource;
 
 /**
  * A data store implementation that stores the records in a database using JDBC.
@@ -96,7 +93,7 @@
  * The tablePrefix can be used to specify a schema and / or catalog name:
  * &lt;param name="tablePrefix" value="ds.">
  */
-public class DbDataStore implements DataStore, DatabaseAware {
+public class DbDataStore implements DataStore {
 
     /**
      * The default value for the minimum object size.
@@ -104,6 +101,11 @@
     public static final int DEFAULT_MIN_RECORD_LENGTH = 100;
 
     /**
+     * The default value for the maximum connections.
+     */
+    public static final int DEFAULT_MAX_CONNECTIONS = 3;
+    
+    /**
      * Write to a temporary file to get the length (slow, but always works).
      * This is the default setting.
      */
@@ -171,6 +173,16 @@
     protected int minRecordLength = DEFAULT_MIN_RECORD_LENGTH;
 
     /**
+     * The maximum number of open connections.
+     */
+    protected int maxConnections = DEFAULT_MAX_CONNECTIONS;
+
+    /**
+     * A list of connections
+     */
+    protected Pool connectionPool;
+
+    /**
      * The prefix for the datastore table, empty by default.
      */
     protected String tablePrefix = "";
@@ -186,11 +198,6 @@
     private boolean schemaCheckEnabled = true;
 
     /**
-     * The logical name of the DataSource to use.
-     */
-    protected String dataSourceName;
-
-    /**
      * This is the property 'table'
      * in the [databaseType].properties file, initialized with the default value.
      */
@@ -290,49 +297,34 @@
     protected List<String> temporaryInUse = Collections.synchronizedList(new ArrayList<String>());
 
     /**
-     * The {@link ConnectionHelper} set in the {@link #init(String)} method.
-     * */
-    protected ConnectionHelper conHelper;
-
-    /**
-     * The repositories {@link ConnectionFactory}.
-     */
-    private ConnectionFactory connectionFactory;
-
-    /**
      * {@inheritDoc}
      */
-    public void setConnectionFactory(ConnectionFactory connnectionFactory) {
-        this.connectionFactory = connnectionFactory;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
     public DataRecord addRecord(InputStream stream) throws DataStoreException {
         ResultSet rs = null;
         TempFileInputStream fileInput = null;
+        ConnectionRecoveryManager conn = getConnection();
         String id = null, tempId = null;
         try {
             long now;
-            while(true) {
+            for (int i = 0; i < ConnectionRecoveryManager.TRIALS; i++) {
                 try {
                     now = System.currentTimeMillis();
                     id = UUID.randomUUID().toString();
                     tempId = TEMP_PREFIX + id;
                     // SELECT LENGTH, LAST_MODIFIED FROM DATASTORE WHERE ID=?
-                    rs = conHelper.exec(selectMetaSQL, new Object[]{tempId}, false, 0);
+                    PreparedStatement prep = conn.executeStmt(selectMetaSQL, new Object[]{tempId});
+                    rs = prep.getResultSet();
                     if (rs.next()) {
                         // re-try in the very, very unlikely event that the row already exists
                         continue;
                     }
                     // INSERT INTO DATASTORE VALUES(?, 0, ?, NULL)
-                    conHelper.exec(insertTempSQL, new Object[]{tempId, new Long(now)});
+                    conn.executeStmt(insertTempSQL, new Object[]{tempId, new Long(now)});
                     break;
                 } catch (Exception e) {
                     throw convert("Can not insert new record", e);
                 } finally {
-                    DbUtility.close(rs);
+                    DatabaseHelper.closeSilently(rs);
                 }
             }
             if (id == null) {
@@ -358,7 +350,7 @@
                 throw new DataStoreException("Unsupported stream store algorithm: " + storeStream);
             }
             // UPDATE DATASTORE SET DATA=? WHERE ID=?
-            conHelper.exec(updateDataSQL, new Object[]{wrapper, tempId});
+            conn.executeStmt(updateDataSQL, new Object[]{wrapper, tempId});
             now = System.currentTimeMillis();
             long length = in.getPosition();
             DataIdentifier identifier = new DataIdentifier(digest.digest());
@@ -367,16 +359,17 @@
             // UPDATE DATASTORE SET ID=?, LENGTH=?, LAST_MODIFIED=?
             // WHERE ID=?
             // AND NOT EXISTS(SELECT ID FROM DATASTORE WHERE ID=?)
-            int count = conHelper.update(updateSQL, new Object[]{
+            PreparedStatement prep = conn.executeStmt(updateSQL, new Object[]{
                     id, new Long(length), new Long(now),
                     tempId, id});
-            rs = null; // prevent that rs.close() is called in finally block if count != 0 (rs is closed above)
+            int count = prep.getUpdateCount();
             if (count == 0) {
                 // update count is 0, meaning such a row already exists
                 // DELETE FROM DATASTORE WHERE ID=?
-                conHelper.exec(deleteSQL, new Object[]{tempId});
+                conn.executeStmt(deleteSQL, new Object[]{tempId});
                 // SELECT LENGTH, LAST_MODIFIED FROM DATASTORE WHERE ID=?
-                rs = conHelper.exec(selectMetaSQL, new Object[]{id}, false, 0);
+                prep = conn.executeStmt(selectMetaSQL, new Object[]{id});
+                rs = prep.getResultSet();
                 if (rs.next()) {
                     long oldLength = rs.getLong(1);
                     long lastModified = rs.getLong(2);
@@ -400,7 +393,8 @@
             if (tempId != null) {
                 temporaryInUse.remove(tempId);
             }
-            DbUtility.close(rs);
+            DatabaseHelper.closeSilently(rs);
+            putBack(conn);
             if (fileInput != null) {
                 try {
                     fileInput.close();
@@ -429,6 +423,7 @@
      * {@inheritDoc}
      */
     public synchronized int deleteAllOlderThan(long min) throws DataStoreException {
+        ConnectionRecoveryManager conn = getConnection();
         try {
             ArrayList<String> touch = new ArrayList<String>();
             ArrayList<DataIdentifier> ids = new ArrayList<DataIdentifier>(inUse.keySet());
@@ -442,9 +437,12 @@
                 updateLastModifiedDate(key, 0);
             }
             // DELETE FROM DATASTORE WHERE LAST_MODIFIED<?
-            return conHelper.update(deleteOlderSQL, new Long[]{new Long(min)});
+            PreparedStatement prep = conn.executeStmt(deleteOlderSQL, new Long[]{new Long(min)});
+            return prep.getUpdateCount();
         } catch (Exception e) {
             throw convert("Can not delete records", e);
+        } finally {
+            putBack(conn);
         }
     }
 
@@ -452,11 +450,13 @@
      * {@inheritDoc}
      */
     public Iterator<DataIdentifier> getAllIdentifiers() throws DataStoreException {
+        ConnectionRecoveryManager conn = getConnection();
         ArrayList<DataIdentifier> list = new ArrayList<DataIdentifier>();
         ResultSet rs = null;
         try {
             // SELECT ID FROM DATASTORE
-            rs = conHelper.exec(selectAllSQL, new Object[0], false, 0);
+            PreparedStatement prep = conn.executeStmt(selectAllSQL, new Object[0]);
+            rs = prep.getResultSet();
             while (rs.next()) {
                 String id = rs.getString(1);
                 if (!id.startsWith(TEMP_PREFIX)) {
@@ -468,7 +468,8 @@
         } catch (Exception e) {
             throw convert("Can not read records", e);
         } finally {
-            DbUtility.close(rs);
+            DatabaseHelper.closeSilently(rs);
+            putBack(conn);
         }
     }
 
@@ -493,12 +494,14 @@
      * {@inheritDoc}
      */
     public DataRecord getRecordIfStored(DataIdentifier identifier) throws DataStoreException {
+        ConnectionRecoveryManager conn = getConnection();
         usesIdentifier(identifier);
         ResultSet rs = null;
         try {
             String id = identifier.toString();
             // SELECT LENGTH, LAST_MODIFIED FROM DATASTORE WHERE ID = ?
-            rs = conHelper.exec(selectMetaSQL, new Object[]{id}, false, 0);
+            PreparedStatement prep = conn.executeStmt(selectMetaSQL, new Object[]{id});
+            rs = prep.getResultSet();
             if (!rs.next()) {
                 throw new DataStoreException("Record not found: " + identifier);
             }
@@ -509,7 +512,8 @@
         } catch (Exception e) {
             throw convert("Can not read identifier " + identifier, e);
         } finally {
-            DbUtility.close(rs);
+            DatabaseHelper.closeSilently(rs);
+            putBack(conn);
         }
     }
     
@@ -534,29 +538,36 @@
      *          or if the given identifier is invalid
      */    
     InputStream openStream(DbInputStream inputStream, DataIdentifier identifier) throws DataStoreException {
+        ConnectionRecoveryManager conn = null;
         ResultSet rs = null;
         try {
+            conn = getConnection();
             // SELECT ID, DATA FROM DATASTORE WHERE ID = ?
-            rs = conHelper.exec(selectDataSQL, new Object[]{identifier.toString()}, false, 0);
+            PreparedStatement prep = conn.executeStmt(selectDataSQL, new Object[]{identifier.toString()});
+            rs = prep.getResultSet();
             if (!rs.next()) {
                 throw new DataStoreException("Record not found: " + identifier);
             }
             InputStream stream = rs.getBinaryStream(2);
             if (stream == null) {
                 stream = new ByteArrayInputStream(new byte[0]);
-                DbUtility.close(rs);
+                DatabaseHelper.closeSilently(rs);
+                putBack(conn);
             } else if (copyWhenReading) {
                 // If we copy while reading, create a temp file and close the stream
                 File temp = moveToTempFile(stream);
                 stream = new TempFileInputStream(temp);
-                DbUtility.close(rs);
+                DatabaseHelper.closeSilently(rs);
+                putBack(conn);
             } else {
                 stream = new BufferedInputStream(stream);
+                inputStream.setConnection(conn);
                 inputStream.setResultSet(rs);
             }
             return stream;
         } catch (Exception e) {
-            DbUtility.close(rs);
+            DatabaseHelper.closeSilently(rs);
+            putBack(conn);
             throw convert("Retrieving database resource ", e);
         }
     }
@@ -567,67 +578,36 @@
     public synchronized void init(String homeDir) throws DataStoreException {
         try {
             initDatabaseType();
-
-            conHelper = createConnectionHelper(getDataSource());
-
-            if (isSchemaCheckEnabled()) {
-                createCheckSchemaOperation().run();
+            connectionPool = new Pool(this, maxConnections);
+            ConnectionRecoveryManager conn = getConnection();
+            DatabaseMetaData meta = conn.getConnection().getMetaData();
+            log.info("Using JDBC driver " + meta.getDriverName() + " " + meta.getDriverVersion());
+            meta.getDriverVersion();
+            ResultSet rs = meta.getTables(null, null, schemaObjectPrefix + tableSQL, null);
+            boolean exists = rs.next();
+            rs.close();
+            if (!exists && isSchemaCheckEnabled()) {
+                // CREATE TABLE DATASTORE(ID VARCHAR(255) PRIMARY KEY, 
+                // LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA BLOB)
+                conn.executeStmt(createTableSQL, null);
             }
+            putBack(conn);
         } catch (Exception e) {
             throw convert("Can not init data store, driver=" + driver + " url=" + url + " user=" + user + 
                     " schemaObjectPrefix=" + schemaObjectPrefix + " tableSQL=" + tableSQL + " createTableSQL=" + createTableSQL, e);
         }
     }
 
-    private DataSource getDataSource() throws Exception {
-        if (getDataSourceName() == null || "".equals(getDataSourceName())) {
-            return connectionFactory.getDataSource(getDriver(), getUrl(), getUser(), getPassword());
-        } else {
-            return connectionFactory.getDataSource(dataSourceName);
-        }
-    }
-
-    /**
-     * This method is called from the {@link #init(String)} method of this class and returns a
-     * {@link ConnectionHelper} instance which is assigned to the {@code conHelper} field. Subclasses may
-     * override it to return a specialized connection helper.
-     * 
-     * @param dataSrc the {@link DataSource} of this persistence manager
-     * @return a {@link ConnectionHelper}
-     * @throws Exception on error
-     */
-    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
-        return new ConnectionHelper(dataSrc, false);
-    }
-
-    /**
-     * This method is called from {@link #init(String)} after the
-     * {@link #createConnectionHelper(DataSource)} method, and returns a default {@link CheckSchemaOperation}.
-     * 
-     * @return a new {@link CheckSchemaOperation} instance
-     */
-    protected final CheckSchemaOperation createCheckSchemaOperation() {
-        String tableName = tablePrefix + schemaObjectPrefix + tableSQL;
-        return new CheckSchemaOperation(conHelper, new ByteArrayInputStream(createTableSQL.getBytes()), tableName);
-    }
-
     protected void initDatabaseType() throws DataStoreException {
-        boolean failIfNotFound = false;
+        boolean failIfNotFound;
         if (databaseType == null) {
-            if (dataSourceName != null) {
-                try {
-                    databaseType = connectionFactory.getDataBaseType(dataSourceName);
-                } catch (RepositoryException e) {
-                    throw new DataStoreException(e);
-                }
-            } else {
-                if (!url.startsWith("jdbc:")) {
-                    return;
-                }
-                int start = "jdbc:".length();
-                int end = url.indexOf(':', start);
-                databaseType = url.substring(start, end);
+            if (!url.startsWith("jdbc:")) {
+                return;
             }
+            failIfNotFound = false;
+            int start = "jdbc:".length();
+            int end = url.indexOf(':', start);
+            databaseType = url.substring(start, end);
         } else {
             failIfNotFound = true;
         }
@@ -742,14 +722,17 @@
         if (lastModified < minModifiedDate) {
             long now = System.currentTimeMillis();
             Long n = new Long(now);
+            ConnectionRecoveryManager conn = getConnection();
             try {
                 // UPDATE DATASTORE SET LAST_MODIFIED = ? WHERE ID = ? AND LAST_MODIFIED < ?
-                conHelper.exec(updateLastModifiedSQL, new Object[]{
+                conn.executeStmt(updateLastModifiedSQL, new Object[]{
                         n, key, n
                 });
                 return now;
             } catch (Exception e) {
                 throw convert("Can not update lastModified", e);
+            } finally {
+                putBack(conn);
             }
         }
         return lastModified;
@@ -866,6 +849,11 @@
      * {@inheritDoc}
      */
     public synchronized void close() throws DataStoreException {
+        ArrayList<ConnectionRecoveryManager> list = connectionPool.getAll();
+        for (ConnectionRecoveryManager conn : list) {
+            conn.close();
+        }
+        list.clear();
     }
 
     protected void usesIdentifier(DataIdentifier identifier) {
@@ -887,27 +875,56 @@
         }
     }
 
+    protected ConnectionRecoveryManager getConnection() throws DataStoreException {
+        try {
+            ConnectionRecoveryManager conn = (ConnectionRecoveryManager) connectionPool.get();
+            conn.setAutoReconnect(true);
+            return conn;
+        } catch (InterruptedException e) {
+            throw new DataStoreException("Interrupted", e);
+        } catch (RepositoryException e) {
+            throw new DataStoreException("Can not open a new connection", e);
+        }
+    }
+
+    protected void putBack(ConnectionRecoveryManager conn) throws DataStoreException {
+        try {
+            connectionPool.add(conn);
+        } catch (InterruptedException e) {
+            throw new DataStoreException("Interrupted", e);
+        }
+    }
+
     /**
      * Get the maximum number of concurrent connections.
      *
-     * @deprecated
      * @return the maximum number of connections.
      */
     public int getMaxConnections() {
-        return -1;
+        return maxConnections;
     }
 
     /**
      * Set the maximum number of concurrent connections in the pool.
      * At least 3 connections are required if the garbage collection process is used.
      *
-     *@deprecated
      * @param maxConnections the new value
      */
     public void setMaxConnections(int maxConnections) {
+        this.maxConnections = maxConnections;
     }
 
     /**
+     * Create a new connection.
+     *
+     * @return the new connection
+     */
+    public ConnectionRecoveryManager createNewConnection() throws RepositoryException {
+        ConnectionRecoveryManager conn = new ConnectionRecoveryManager(false, driver, url, user, password);
+        return conn;
+    }
+
+    /**
      * Is a stream copied to a temporary file before returning?
      *
      * @return the setting
@@ -966,11 +983,4 @@
         this.schemaObjectPrefix = schemaObjectPrefix;
     }    
 
-    public String getDataSourceName() {
-        return dataSourceName;
-    }
-
-    public void setDataSourceName(String dataSourceName) {
-        this.dataSourceName = dataSourceName;
-    }
 }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DerbyDataStore.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DerbyDataStore.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DerbyDataStore.java	(.../trunk)	(revision 827970)
@@ -16,13 +16,14 @@
  */
 package org.apache.jackrabbit.core.data.db;
 
+import java.sql.DriverManager;
 import java.sql.SQLException;
 
-import javax.sql.DataSource;
-
 import org.apache.jackrabbit.core.data.DataStoreException;
-import org.apache.jackrabbit.core.util.db.ConnectionHelper;
-import org.apache.jackrabbit.core.util.db.DerbyConnectionHelper;
+import org.apache.jackrabbit.core.persistence.bundle.DerbyPersistenceManager;
+import org.apache.jackrabbit.core.persistence.bundle.util.ConnectionRecoveryManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The Derby data store closes the database when the data store is closed
@@ -31,22 +32,49 @@
 public class DerbyDataStore extends DbDataStore {
 
     /**
-     * {@inheritDoc}
+     * Logger instance
      */
-    @Override
-    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
-        return new DerbyConnectionHelper(dataSrc, false);
-    }
+    private static Logger log = LoggerFactory.getLogger(DerbyDataStore.class);
 
-    /**
-     * {@inheritDoc}
-     */
-    @Override
     public synchronized void close() throws DataStoreException {
         super.close();
+
+        // check for embedded driver
+        if (!DerbyPersistenceManager.DERBY_EMBEDDED_DRIVER.equals(getDriver())) {
+            return;
+        }
+
         try {
-            ((DerbyConnectionHelper) conHelper).shutDown(getDriver());
-        } catch (SQLException e) {
+
+            // prepare connection url for issuing shutdown command
+            ConnectionRecoveryManager connectionManager = getConnection();
+
+            String url = connectionManager.getConnection().getMetaData().getURL();
+            int pos = url.lastIndexOf(';');
+            if (pos != -1) {
+                // strip any attributes from connection url
+                url = url.substring(0, pos);
+            }
+            url += ";shutdown=true";
+
+            // we have to reset the connection to 'autoCommit=true' before closing it;
+            // otherwise Derby would mysteriously complain about some pending uncommitted
+            // changes which can't possibly be true.
+            // @todo further investigate
+            connectionManager.getConnection().setAutoCommit(true);
+
+            // need to call it again because we just opened a connection,
+            // and super.close() closes it.
+            super.close();
+
+            // now it's safe to shutdown the embedded Derby database
+            try {
+                DriverManager.getConnection(url);
+            } catch (SQLException e) {
+                // a shutdown command always raises a SQLException
+                log.info(e.getMessage());
+            }
+        } catch (Exception e) {
             throw new DataStoreException(e);
         }
     }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/RepositoryImpl.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/RepositoryImpl.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/RepositoryImpl.java	(.../trunk)	(revision 827970)
@@ -284,7 +284,7 @@
         boolean succeeded = false;
         try {
             this.repConfig = repConfig;
-            
+
             // setup file systems
             repStore = repConfig.getFileSystem();
             String fsRootPath = "/meta";
@@ -1193,8 +1193,6 @@
             log.warn("Interrupted while waiting for background threads", e);
         }
 
-        repConfig.getConnectionFactory().close();
-
         // finally release repository lock
         if (repLock != null) {
             try {
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/DataSourceConfig.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/DataSourceConfig.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/DataSourceConfig.java	(.../trunk)	(revision 827970)
@@ -1,227 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.jackrabbit.core.config;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Properties;
-
-import javax.naming.Context;
-
-/**
- * This class contains list of definitions for {@code DataSource} instances.
- */
-public class DataSourceConfig {
-
-    public static final String LOGICAL_NAME = "logicalName";
-
-    public static final String DRIVER = "driver";
-
-    public static final String URL = "url";
-
-    public static final String USER = "user";
-
-    public static final String PASSWORD = "password";
-
-    public static final String DB_TYPE = "databaseType";
-
-    public static final String VALIDATION_QUERY = "validationQuery";
-
-    public static final String MAX_POOL_SIZE = "maxPoolSize";
-
-    private final List<DataSourceDefinition> defs = new ArrayList<DataSourceDefinition>();
-
-    /**
-     * Adds a DataSourceDefinition from the given properties.
-     * 
-     * @param props the properties (key and values must be strings)
-     * @throws ConfigurationException on error
-     */
-    public void addDataSourceDefinition(Properties props) throws ConfigurationException {
-        DataSourceDefinition def = new DataSourceDefinition(props);
-        for (DataSourceDefinition existing : defs) {
-            if (existing.getLogicalName().equals(def.getLogicalName())) {
-                throw new ConfigurationException("Duplicate logicalName for a DataSource: "
-                        + def.getLogicalName());
-            }
-        }
-        defs.add(def);
-    }
-
-    /**
-     * @return the unmodifiable list of the current {@link DataSourceDefinition}s
-     */
-    public List<DataSourceDefinition> getDefinitions() {
-        return Collections.unmodifiableList(defs);
-    }
-
-    /**
-     * The definition of a DataSource. 
-     */
-    public static final class DataSourceDefinition {
-
-        private static final List<String> allPropNames =
-            Arrays
-                .asList(LOGICAL_NAME, DRIVER, URL, USER, PASSWORD, DB_TYPE, VALIDATION_QUERY, MAX_POOL_SIZE);
-
-        private static final List<String> allJndiPropNames =
-            Arrays.asList(LOGICAL_NAME, DRIVER, URL, USER, PASSWORD, DB_TYPE);
-
-        private final String logicalName;
-
-        private final String driver;
-
-        private final String url;
-
-        private final String user;
-
-        private final String password;
-
-        private final String dbType;
-
-        private final String validationQuery;
-
-        private final int maxPoolSize;
-
-        /**
-         * Creates a DataSourceDefinition from the given properties and 
-         * throws a {@link ConfigurationException} when the set of properties does not
-         * satisfy some validity constraints.
-         * 
-         * @param props the properties (string keys and values)
-         * @throws ConfigurationException on error
-         */
-        public DataSourceDefinition(Properties props) throws ConfigurationException {
-            this.logicalName = (String) props.getProperty(LOGICAL_NAME);
-            this.driver = (String) props.getProperty(DRIVER);
-            this.url = (String) props.getProperty(URL);
-            this.user = (String) props.getProperty(USER);
-            this.password = (String) props.getProperty(PASSWORD);
-            this.dbType = (String) props.getProperty(DB_TYPE);
-            this.validationQuery = (String) props.getProperty(VALIDATION_QUERY);
-            try {
-                this.maxPoolSize = Integer.parseInt((String) props.getProperty(MAX_POOL_SIZE, "-1"));
-            } catch (NumberFormatException e) {
-                throw new ConfigurationException("failed to parse " + MAX_POOL_SIZE
-                        + " property for DataSource " + logicalName);
-            }
-            verify(props);
-        }
-
-        private void verify(Properties props) throws ConfigurationException {
-            // Check required properties
-            if (logicalName == null || "".equals(logicalName)) {
-                throw new ConfigurationException("DataSource logical name must not be null or empty");
-            }
-            if (driver == null || "".equals(driver)) {
-                throw new ConfigurationException("DataSource driver must not be null or empty");
-            }
-            if (url == null || "".equals(url)) {
-                throw new ConfigurationException("DataSource URL must not be null or empty");
-            }
-            if (dbType == null || "".equals(dbType)) {
-                throw new ConfigurationException("DataSource databaseType must not be null or empty");
-            }
-            // Check unknown properties
-            for (Object propName : props.keySet()) {
-                if (!allPropNames.contains((String) propName)) {
-                    throw new ConfigurationException("Unknown DataSource property: " + propName);
-                }
-            }
-            // Check JNDI config:
-            if (isJndiConfig()) {
-                for (Object propName : props.keySet()) {
-                    if (!allJndiPropNames.contains((String) propName)) {
-                        throw new ConfigurationException("Property " + propName
-                                + " is not allowed for a DataSource obtained through JNDI"
-                                + ", DataSource logicalName = " + logicalName);
-                    }
-                }
-            }
-        }
-
-        private boolean isJndiConfig() throws ConfigurationException {
-            Class<?> driverClass = null;
-            try {
-                if (driver.length() > 0) {
-                    driverClass = Class.forName(driver);
-                }
-            } catch (ClassNotFoundException e) {
-                throw new ConfigurationException("Could not load JDBC driver class " + driver, e);
-            }
-            return driverClass != null && Context.class.isAssignableFrom(driverClass);
-        }
-
-        /**
-         * @return the logicalName
-         */
-        public String getLogicalName() {
-            return logicalName;
-        }
-
-        /**
-         * @return the driver
-         */
-        public String getDriver() {
-            return driver;
-        }
-
-        /**
-         * @return the url
-         */
-        public String getUrl() {
-            return url;
-        }
-
-        /**
-         * @return the user
-         */
-        public String getUser() {
-            return user;
-        }
-
-        /**
-         * @return the dbType
-         */
-        public String getDbType() {
-            return dbType;
-        }
-
-        /**
-         * @return the password
-         */
-        public String getPassword() {
-            return password;
-        }
-
-        /**
-         * @return the validationQuery
-         */
-        public String getValidationQuery() {
-            return validationQuery;
-        }
-
-        /**
-         * @return the maxPoolSize
-         */
-        public int getMaxPoolSize() {
-            return maxPoolSize;
-        }
-    }
-}
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/RepositoryConfig.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/RepositoryConfig.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/RepositoryConfig.java	(.../trunk)	(revision 827970)
@@ -29,7 +29,6 @@
 import org.apache.jackrabbit.core.query.QueryHandlerFactory;
 import org.apache.jackrabbit.core.util.RepositoryLockMechanism;
 import org.apache.jackrabbit.core.util.RepositoryLockMechanismFactory;
-import org.apache.jackrabbit.core.util.db.ConnectionFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.w3c.dom.Element;
@@ -258,7 +257,7 @@
         variables.setProperty(
                 RepositoryConfigurationParser.REPOSITORY_HOME_VARIABLE, home);
         RepositoryConfigurationParser parser =
-            new RepositoryConfigurationParser(variables, new ConnectionFactory());
+            new RepositoryConfigurationParser(variables);
 
         RepositoryConfig config = parser.parseRepositoryConfig(xml);
         config.init();
@@ -350,16 +349,6 @@
     private final RepositoryLockMechanismFactory rlf;
 
     /**
-     * The configuration for the used DataSources.
-     */
-    private final DataSourceConfig dsc;
-
-    /**
-     * The {@link ConnectionFactory}
-     */
-    private final ConnectionFactory cf;
-
-    /**
      * Creates a repository configuration object.
      *
      * @param home repository home directory
@@ -374,9 +363,6 @@
      * @param qhf query handler factory for the system search manager
      * @param cc optional cluster configuration
      * @param dsf data store factory
-     * @param rlf the RepositoryLockMechanismFactory
-     * @param dsc the DataSource configuration
-     * @param cf the ConnectionFactory for all DatabasAware beans
      * @param parser configuration parser
      */
     public RepositoryConfig(
@@ -386,8 +372,6 @@
             Element template, VersioningConfig vc, QueryHandlerFactory qhf,
             ClusterConfig cc, DataStoreFactory dsf,
             RepositoryLockMechanismFactory rlf,
-            DataSourceConfig dsc,
-            ConnectionFactory cf,
             RepositoryConfigurationParser parser) {
         workspaces = new HashMap<String, WorkspaceConfig>();
         this.home = home;
@@ -403,8 +387,6 @@
         this.cc = cc;
         this.dsf = dsf;
         this.rlf = rlf;
-        this.dsc = dsc;
-        this.cf = cf;
         this.parser = parser;
     }
 
@@ -417,15 +399,6 @@
      *                               been initialized
      */
     public void init() throws ConfigurationException, IllegalStateException {
-        
-        // This needs to be done here and not by clients (e.g., RepositoryImpl ctor) because
-        // fsf is used below and this might be a DatabaseAware FileSystem
-        try {
-            cf.registerDataSources(dsc);
-        } catch (RepositoryException e) {
-            throw new ConfigurationException("failed to register data sources", e);
-        }
-
         if (!workspaces.isEmpty()) {
             throw new IllegalStateException(
                     "Repository configuration has already been initialized.");
@@ -942,14 +915,6 @@
     }
 
     /**
-     * Returns the {@link ConnectionFactory} for this repository.
-     * Please note that it must be closed explicitly.
-     */
-    public ConnectionFactory getConnectionFactory() {
-        return cf;
-    }
-
-    /**
      * Creates and returns the configured data store. Returns
      * <code>null</code> if a data store has not been configured.
      *
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/RepositoryConfigurationParser.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/RepositoryConfigurationParser.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/RepositoryConfigurationParser.java	(.../trunk)	(revision 827970)
@@ -36,7 +36,6 @@
 import org.apache.jackrabbit.core.util.RepositoryLock;
 import org.apache.jackrabbit.core.util.RepositoryLockMechanism;
 import org.apache.jackrabbit.core.util.RepositoryLockMechanismFactory;
-import org.apache.jackrabbit.core.util.db.ConnectionFactory;
 import org.apache.jackrabbit.spi.commons.namespace.NamespaceResolver;
 import org.w3c.dom.Element;
 import org.w3c.dom.Node;
@@ -117,12 +116,6 @@
     /** Name of the cluster configuration element. */
     public static final String CLUSTER_ELEMENT = "Cluster";
 
-    /** Name of the data source configuration element. */
-    public static final String DATASOURCES_ELEMENT = "DataSources";
-
-    /** Name of the data source configuration element. */
-    public static final String DATASOURCE_ELEMENT = "DataSource";
-
     /** Name of the journal configuration element. */
     public static final String JOURNAL_ELEMENT = "Journal";
 
@@ -188,11 +181,6 @@
     private static final String AC_PROVIDER_ELEMENT = "AccessControlProvider";
 
     /**
-     * The repositories {@link ConnectionFactory}. 
-     */
-    private final ConnectionFactory connectionFactory;
-
-    /**
      * Name of the cluster node id file.
      */
     private static final String CLUSTER_NODE_ID_FILE = "cluster_node.id";
@@ -202,9 +190,8 @@
      *
      * @param variables parser variables
      */
-    public RepositoryConfigurationParser(Properties variables, ConnectionFactory connectionFactory) {
+    public RepositoryConfigurationParser(Properties variables) {
         super(variables);
-        this.connectionFactory = connectionFactory;
     }
 
     /**
@@ -302,36 +289,12 @@
 
         RepositoryLockMechanismFactory rlf = getRepositoryLockMechanismFactory(root);
 
-        // Optional data source configuration
-        DataSourceConfig dsc = parseDataSourceConfig(root);
-
         return new RepositoryConfig(home, securityConfig, fsf,
                 workspaceDirectory, workspaceConfigDirectory, defaultWorkspace,
-                maxIdleTime, template, vc, qhf, cc, dsf, rlf, dsc, connectionFactory, this);
+                maxIdleTime, template, vc, qhf, cc, dsf, rlf, this);
     }
 
-    
     /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected BeanConfig parseBeanConfig(Element parent, String name) throws ConfigurationException {
-        BeanConfig cfg = super.parseBeanConfig(parent, name);
-        cfg.setConnectionFactory(connectionFactory);
-        return cfg;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected BeanConfig parseBeanConfig(Element element) throws ConfigurationException {
-        BeanConfig cfg = super.parseBeanConfig(element);
-        cfg.setConnectionFactory(connectionFactory);
-        return cfg;
-    }
-
-    /**
      * Parses security configuration. Security configuration
      * uses the following format:
      * <pre>
@@ -825,38 +788,6 @@
     }
 
     /**
-     * TODO
-     * 
-     * @param parent
-     * @return
-     * @throws ConfigurationException
-     */
-    protected DataSourceConfig parseDataSourceConfig(Element parent)
-            throws ConfigurationException {
-        DataSourceConfig dsc = new DataSourceConfig();
-        NodeList children = parent.getChildNodes();
-        for (int i = 0; i < children.getLength(); i++) {
-            Node child = children.item(i);
-            if (child.getNodeType() == Node.ELEMENT_NODE
-                    && DATASOURCES_ELEMENT.equals(child.getNodeName())) {
-                Element element = (Element) child;
-                NodeList children2 = element.getChildNodes();
-                // Process the DataSource entries:
-                for (int j = 0; j < children2.getLength(); j++) {
-                    Node child2 = children2.item(j);
-                    if (child2.getNodeType() == Node.ELEMENT_NODE
-                            && DATASOURCE_ELEMENT.equals(child2.getNodeName())) {
-                        Element dsdef = (Element) child2;
-                        Properties props = parseParameters(dsdef);
-                        dsc.addDataSourceDefinition(props);
-                    }
-                }
-            }
-        }
-        return dsc;
-    }
-
-    /**
      * Parses data store configuration. Data store configuration uses the following format:
      * <pre>
      *   &lt;DataStore class="..."&gt;
@@ -950,7 +881,7 @@
 
     /**
      * Creates a new instance of a configuration parser but with overlayed
-     * variables and the same connection factory as this parser.
+     * variables.
      *
      * @param variables the variables overlay
      * @return a new configuration parser instance
@@ -959,7 +890,7 @@
         // overlay the properties
         Properties props = new Properties(getVariables());
         props.putAll(variables);
-        return new RepositoryConfigurationParser(props, connectionFactory);
+        return new RepositoryConfigurationParser(props);
     }
 
     /**
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/BeanConfig.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/BeanConfig.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/BeanConfig.java	(.../trunk)	(revision 827970)
@@ -17,8 +17,6 @@
 package org.apache.jackrabbit.core.config;
 
 import org.apache.commons.collections.BeanMap;
-import org.apache.jackrabbit.core.util.db.ConnectionFactory;
-import org.apache.jackrabbit.core.util.db.DatabaseAware;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -80,11 +78,6 @@
     private final Properties properties;
 
     /**
-     * The repositories {@link ConnectionFactory}.
-     */
-    private ConnectionFactory connectionFactory = null;
-
-    /**
      * Flag to validate the configured bean property names against
      * the configured bean class. By default this is <code>true</code>
      * to prevent incorrect property names. However, in some cases this
@@ -122,7 +115,6 @@
      */
     public BeanConfig(BeanConfig config) {
         this(config.getClassName(), config.getParameters());
-        setConnectionFactory(config.connectionFactory);
     }
 
     /**
@@ -136,14 +128,6 @@
     }
 
     /**
-     * @param connectionFactory the {@link ConnectionFactory} to inject (if possible) in the
-     *            {@link #newInstance()} method
-     */
-    public void setConnectionFactory(ConnectionFactory connectionFactory) {
-        this.connectionFactory = connectionFactory;
-    }
-
-    /**
      * Returns the class name of the configured bean.
      *
      * @return class name of the bean
@@ -183,10 +167,6 @@
                 }
             }
 
-            if (object instanceof DatabaseAware) {
-                ((DatabaseAware) object).setConnectionFactory(connectionFactory);
-            }
-
             if (validate) {
                 // Check that no invalid property names were configured
                 for (Object key : properties.keySet()) {
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/journal/DatabaseJournal.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/journal/DatabaseJournal.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/journal/DatabaseJournal.java	(.../trunk)	(revision 827970)
@@ -16,28 +16,26 @@
  */
 package org.apache.jackrabbit.core.journal;
 
-import org.apache.commons.io.IOUtils;
-import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
-import org.apache.jackrabbit.core.util.db.ConnectionFactory;
-import org.apache.jackrabbit.core.util.db.ConnectionHelper;
-import org.apache.jackrabbit.core.util.db.DatabaseAware;
-import org.apache.jackrabbit.core.util.db.DbUtility;
-import org.apache.jackrabbit.core.util.db.StreamWrapper;
+import org.apache.jackrabbit.core.persistence.bundle.util.ConnectionFactory;
 import org.apache.jackrabbit.spi.commons.namespace.NamespaceResolver;
+import org.apache.jackrabbit.util.Text;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.BufferedReader;
-import java.io.ByteArrayInputStream;
 import java.io.File;
+import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.Calendar;
 
 import javax.jcr.RepositoryException;
-import javax.sql.DataSource;
 
 /**
  * Database-based journal implementation. Stores records inside a database table named
@@ -78,9 +76,20 @@
  * </pre> *
  * </ul>
  */
-public class DatabaseJournal extends AbstractJournal implements DatabaseAware {
+public class DatabaseJournal extends AbstractJournal {
 
     /**
+     * Schema object prefix.
+     */
+    private static final String SCHEMA_OBJECT_PREFIX_VARIABLE =
+            "${schemaObjectPrefix}";
+
+    /**
+     * Default DDL script name.
+     */
+    private static final String DEFAULT_DDL_NAME = "default.ddl";
+
+    /**
      * Default journal table name, used to check schema completeness.
      */
     private static final String DEFAULT_JOURNAL_TABLE = "JOURNAL";
@@ -91,6 +100,11 @@
     private static final String LOCAL_REVISIONS_TABLE = "LOCAL_REVISIONS";
 
     /**
+     * Default reconnect delay in milliseconds.
+     */
+    private static final long DEFAULT_RECONNECT_DELAY_MS = 10000;
+
+    /**
      * Logger.
      */
     private static Logger log = LoggerFactory.getLogger(DatabaseJournal.class);
@@ -121,16 +135,61 @@
     private String password;
 
     /**
-     * DataSource logical name, bean property.
+     * Reconnect delay in milliseconds, bean property.
      */
-    private String dataSourceName;
+    private long reconnectDelayMs;
 
     /**
-     * The connection helper
+     * JDBC Connection used.
      */
-    private ConnectionHelper conHelper;
+    private Connection connection;
 
     /**
+     * Statement returning all revisions within a range.
+     */
+    private PreparedStatement selectRevisionsStmt;
+
+    /**
+     * Statement updating the global revision.
+     */
+    private PreparedStatement updateGlobalStmt;
+
+    /**
+     * Statement returning the global revision.
+     */
+    private PreparedStatement selectGlobalStmt;
+
+    /**
+     * Statement appending a new record.
+     */
+    private PreparedStatement insertRevisionStmt;
+
+    /**
+     * Statement returning the minimum of the local revisions.
+     */
+    private PreparedStatement selectMinLocalRevisionStmt;
+
+    /**
+     * Statement removing a set of revisions with from the journal table.
+     */
+    private PreparedStatement cleanRevisionStmt;
+
+    /**
+     * Statement returning the local revision of this cluster node.
+     */
+    private PreparedStatement getLocalRevisionStmt;
+
+    /**
+     * Statement for inserting the local revision of this cluster node.
+     */
+    private PreparedStatement insertLocalRevisionStmt;
+
+    /**
+     * Statement for updating the local revision of this cluster node.
+     */
+    private PreparedStatement updateLocalRevisionStmt;
+
+    /**
      * Auto commit level.
      */
     private int lockLevel;
@@ -141,9 +200,14 @@
     private long lockedRevision;
 
     /**
+     * Next time in milliseconds to reattempt connecting to the database.
+     */
+    private long reconnectTimeMs;
+
+    /**
      * Whether the revision table janitor thread is enabled.
      */
-    private boolean janitorEnabled = false;
+    private boolean janitorEnabled;
 
     /**
      * The sleep time of the revision table janitor in seconds, 1 day default.
@@ -176,7 +240,6 @@
      * The instance that manages the local revision.
      */
     private DatabaseRevision databaseRevision;
-
     /**
      * SQL statement returning all revisions within a range.
      */
@@ -206,19 +269,19 @@
      * SQL statement removing a set of revisions with from the journal table.
      */
     protected String cleanRevisionStmtSQL;
-    
+
     /**
      * SQL statement returning the local revision of this cluster node.
      */
     protected String getLocalRevisionStmtSQL;
-    
+
     /**
-     * SQL statement for inserting the local revision of this cluster node. 
+     * SQL statement for inserting the local revision of this cluster node.
      */
     protected String insertLocalRevisionStmtSQL;
 
     /**
-     * SQL statement for updating the local revision of this cluster node. 
+     * SQL statement for updating the local revision of this cluster node.
      */
     protected String updateLocalRevisionStmtSQL;
 
@@ -228,49 +291,36 @@
     protected String schemaObjectPrefix;
 
     /**
-     * The repositories {@link ConnectionFactory}.
-     */
-    private ConnectionFactory connectionFactory;
-
-    public DatabaseJournal() {
-        databaseType = "default";
-        schemaObjectPrefix = "";
-    }
-
-    /**
      * {@inheritDoc}
      */
-    public void setConnectionFactory(ConnectionFactory connnectionFactory) {
-        this.connectionFactory = connnectionFactory;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
     public void init(String id, NamespaceResolver resolver)
             throws JournalException {
 
         super.init(id, resolver);
 
+        // Provide valid defaults for arguments
+        if (schemaObjectPrefix == null) {
+            schemaObjectPrefix = "";
+        }
+        if (reconnectDelayMs == 0) {
+            reconnectDelayMs = DEFAULT_RECONNECT_DELAY_MS;
+        }
+
         init();
 
         try {
-            conHelper = createConnectionHelper(getDataSource());
-
-            // make sure schemaObjectPrefix consists of legal name characters only
-            schemaObjectPrefix = conHelper.prepareDbIdentifier(schemaObjectPrefix);
-
-            // check if schema objects exist and create them if necessary
+            connection = getConnection();
+            setAutoCommit(connection, true);
             if (isSchemaCheckEnabled()) {
-                createCheckSchemaOperation().run();
+                checkSchema();
             }
-
             // Make sure that the LOCAL_REVISIONS table exists (see JCR-1087)
             if (isSchemaCheckEnabled()) {
                 checkLocalRevisionSchema();
             }
 
             buildSQLStatements();
+            prepareStatements();
             initInstanceRevisionAndJanitor();
         } catch (Exception e) {
             String msg = "Unable to create connection.";
@@ -279,41 +329,7 @@
         log.info("DatabaseJournal initialized.");
     }
 
-    private DataSource getDataSource() throws Exception {
-        if (getDataSourceName() == null || "".equals(getDataSourceName())) {
-            return connectionFactory.getDataSource(getDriver(), getUrl(), getUser(), getPassword());
-        } else {
-            return connectionFactory.getDataSource(dataSourceName);
-        }
-    }
-
     /**
-     * This method is called from the {@link #init(String, NamespaceResolver)} method of this class and
-     * returns a {@link ConnectionHelper} instance which is assigned to the {@code conHelper} field.
-     * Subclasses may override it to return a specialized connection helper.
-     * 
-     * @param dataSrc the {@link DataSource} of this persistence manager
-     * @return a {@link ConnectionHelper}
-     * @throws Exception on error
-     */
-    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
-        return new ConnectionHelper(dataSrc, false);
-    }
-
-    /**
-     * This method is called from {@link #init(String, NamespaceResolver)} after the
-     * {@link #createConnectionHelper(DataSource)} method, and returns a default {@link CheckSchemaOperation}.
-     * Subclasses can overrride this implementation to get a customized implementation.
-     * 
-     * @return a new {@link CheckSchemaOperation} instance
-     */
-    protected CheckSchemaOperation createCheckSchemaOperation() {
-        InputStream in = DatabaseJournal.class.getResourceAsStream(databaseType + ".ddl");
-        return new CheckSchemaOperation(conHelper, in, schemaObjectPrefix + DEFAULT_JOURNAL_TABLE).addVariableReplacement(
-            CheckSchemaOperation.SCHEMA_OBJECT_PREFIX_VARIABLE, schemaObjectPrefix);
-    }
-
-    /**
      * Completes initialization of this database journal. Base implementation
      * checks whether the required bean properties <code>driver</code> and
      * <code>url</code> have been specified and optionally deduces a valid
@@ -324,24 +340,15 @@
      * @throws JournalException if initialization fails
      */
     protected void init() throws JournalException {
-        if (driver == null && dataSourceName == null) {
+        if (driver == null) {
             String msg = "Driver not specified.";
             throw new JournalException(msg);
         }
-        if (url == null && dataSourceName == null) {
+        if (url == null) {
             String msg = "Connection URL not specified.";
             throw new JournalException(msg);
         }
-        if (dataSourceName != null) {
-            try {
-                String configuredDatabaseType = connectionFactory.getDataBaseType(dataSourceName);
-                if (DatabaseJournal.class.getResourceAsStream(configuredDatabaseType + ".ddl") != null) {
-                    setDatabaseType(configuredDatabaseType);
-                }
-            } catch (RepositoryException e) {
-                throw new JournalException("failed to get database type", e);
-            }
-        }
+
         if (databaseType == null) {
             try {
                 databaseType = getDatabaseTypeFromURL(url);
@@ -350,6 +357,13 @@
                 throw new JournalException(msg);
             }
         }
+
+        try {
+            Class.forName(driver);
+        } catch (ClassNotFoundException e) {
+            String msg = "Unable to load driver class.";
+            throw new JournalException(msg, e);
+        }
     }
 
     /**
@@ -392,6 +406,27 @@
     }
 
     /**
+     * Creates a new database connection. This method is called inside
+     * {@link #init(String, org.apache.jackrabbit.spi.commons.namespace.NamespaceResolver)} or
+     * when a connection has been dropped and must be reacquired. Base
+     * implementation uses <code>java.sql.DriverManager</code> to get the
+     * connection. May be overridden by subclasses.
+     *
+     * @see #init()
+     * @return new connection
+     * @throws JournalException if the driver could not be loaded
+     * @throws SQLException if the connection could not be established
+     */
+    protected Connection getConnection() throws SQLException, JournalException {
+        try {
+            return ConnectionFactory.getConnection(driver, url, user, password);
+        } catch (RepositoryException e) {
+            String msg = "Unable to load driver class.";
+            throw new JournalException(msg, e);
+        }
+    }
+
+    /**
      * Derive a database type from a JDBC connection URL. This simply treats the given URL
      * as delimeted by colons and takes the 2nd field.
      *
@@ -413,12 +448,24 @@
     /**
      * {@inheritDoc}
      */
-    public RecordIterator getRecords(long startRevision) throws JournalException {
+    public RecordIterator getRecords(long startRevision)
+            throws JournalException {
+
         try {
-            return new DatabaseRecordIterator(conHelper.exec(selectRevisionsStmtSQL, new Object[]{new Long(
-                    startRevision)}, false, 0), getResolver(), getNamePathResolver());
+            checkConnection();
+
+            selectRevisionsStmt.clearParameters();
+            selectRevisionsStmt.clearWarnings();
+            selectRevisionsStmt.setLong(1, startRevision);
+            selectRevisionsStmt.execute();
+
+            return new DatabaseRecordIterator(
+                    selectRevisionsStmt.getResultSet(), getResolver(), getNamePathResolver());
         } catch (SQLException e) {
-            throw new JournalException("Unable to return record iterator.", e);
+            close(true);
+
+            String msg = "Unable to return record iterator.";
+            throw new JournalException(msg, e);
         }
     }
 
@@ -427,10 +474,20 @@
      */
     public RecordIterator getRecords() throws JournalException {
         try {
-            return new DatabaseRecordIterator(conHelper.exec(selectRevisionsStmtSQL, new Object[]{new Long(
-                    Long.MIN_VALUE)}, false, 0), getResolver(), getNamePathResolver());
+            checkConnection();
+
+            selectRevisionsStmt.clearParameters();
+            selectRevisionsStmt.clearWarnings();
+            selectRevisionsStmt.setLong(1, Long.MIN_VALUE);
+            selectRevisionsStmt.execute();
+
+            return new DatabaseRecordIterator(
+                    selectRevisionsStmt.getResultSet(), getResolver(), getNamePathResolver());
         } catch (SQLException e) {
-            throw new JournalException("Unable to return record iterator.", e);
+            close(true);
+
+            String msg = "Unable to return record iterator.";
+            throw new JournalException(msg, e);
         }
     }
 
@@ -447,16 +504,27 @@
         boolean succeeded = false;
 
         try {
+            checkConnection();
             if (lockLevel++ == 0) {
-                conHelper.startBatch();
+                setAutoCommit(connection, false);
             }
         } catch (SQLException e) {
-            throw new JournalException("Unable to set autocommit to false.", e);
+            close(true);
+
+            String msg = "Unable to set autocommit to false.";
+            throw new JournalException(msg, e);
         }
 
         try {
-            conHelper.exec(updateGlobalStmtSQL);
-            rs = conHelper.exec(selectGlobalStmtSQL, null, false, 0);
+            updateGlobalStmt.clearParameters();
+            updateGlobalStmt.clearWarnings();
+            updateGlobalStmt.execute();
+
+            selectGlobalStmt.clearParameters();
+            selectGlobalStmt.clearWarnings();
+            selectGlobalStmt.execute();
+
+            rs = selectGlobalStmt.getResultSet();
             if (!rs.next()) {
                  throw new JournalException("No revision available.");
             }
@@ -464,9 +532,12 @@
             succeeded = true;
 
         } catch (SQLException e) {
-            throw new JournalException("Unable to lock global revision table.", e);
+            close(true);
+
+            String msg = "Unable to lock global revision table.";
+            throw new JournalException(msg, e);
         } finally {
-            DbUtility.close(rs);
+            close(rs);
             if (!succeeded) {
                 doUnlock(false);
             }
@@ -478,11 +549,12 @@
      */
     protected void doUnlock(boolean successful) {
         if (--lockLevel == 0) {
-            try {
-                conHelper.endBatch(successful);;
-            } catch (SQLException e) {
-                log.error("failed to end batch", e);
+            if (successful) {
+                commit(connection);
+            } else {
+                rollback(connection);
             }
+            setAutoCommit(connection, true);
         }
     }
 
@@ -504,10 +576,19 @@
             throws JournalException {
 
         try {
-            conHelper.exec(insertRevisionStmtSQL, record.getRevision(), getId(), record.getProducerId(),
-                new StreamWrapper(in, length));
+            checkConnection();
 
+            insertRevisionStmt.clearParameters();
+            insertRevisionStmt.clearWarnings();
+            insertRevisionStmt.setLong(1, record.getRevision());
+            insertRevisionStmt.setString(2, getId());
+            insertRevisionStmt.setString(3, record.getProducerId());
+            insertRevisionStmt.setBinaryStream(4, in, length);
+            insertRevisionStmt.execute();
+
         } catch (SQLException e) {
+            close(true);
+
             String msg = "Unable to append revision " + lockedRevision + ".";
             throw new JournalException(msg, e);
         }
@@ -517,43 +598,313 @@
      * {@inheritDoc}
      */
     public void close() {
+        close(false);
         if (janitorThread != null) {
             janitorThread.interrupt();
         }
     }
 
     /**
+     * Close database connections and statements. If closing was due to an
+     * error that occurred, calculates the next time a reconnect should
+     * be attempted.
+     *
+     * @param failure whether closing is due to a failure
+     */
+    private void close(boolean failure) {
+        if (failure) {
+            reconnectTimeMs = System.currentTimeMillis() + reconnectDelayMs;
+        }
+
+        close(selectRevisionsStmt);
+        selectRevisionsStmt = null;
+        close(updateGlobalStmt);
+        updateGlobalStmt = null;
+        close(selectGlobalStmt);
+        selectGlobalStmt = null;
+        close(insertRevisionStmt);
+        insertRevisionStmt = null;
+        close(selectMinLocalRevisionStmt);
+        selectMinLocalRevisionStmt = null;
+        close(cleanRevisionStmt);
+        cleanRevisionStmt = null;
+        close(getLocalRevisionStmt);
+        getLocalRevisionStmt = null;
+        close(insertLocalRevisionStmt);
+        insertLocalRevisionStmt = null;
+        close(updateLocalRevisionStmt);
+        updateLocalRevisionStmt = null;
+
+        close(connection);
+        connection = null;
+    }
+
+    /**
+     * Set the autocommit flag of a connection. Does nothing if the connection
+     * passed is <code>null</code> and logs any exception as warning.
+     *
+     * @param connection database connection
+     * @param autoCommit where to enable or disable autocommit
+     */
+    private static void setAutoCommit(Connection connection, boolean autoCommit) {
+        if (connection != null) {
+            try {
+                // JCR-1013: Setter may fail on a managed connection
+                if (connection.getAutoCommit() != autoCommit) {
+                    connection.setAutoCommit(autoCommit);
+                }
+            } catch (SQLException e) {
+                String msg = "Unable to set autocommit flag to " + autoCommit;
+                log.warn(msg, e);
+            }
+        }
+    }
+
+    /**
+     * Commit a connection. Does nothing if the connection passed is
+     * <code>null</code> and logs any exception as warning.
+     *
+     * @param connection connection.
+     */
+    private static void commit(Connection connection) {
+        if (connection != null) {
+            try {
+                connection.commit();
+            } catch (SQLException e) {
+                String msg = "Error while committing connection: " + e.getMessage();
+                log.warn(msg);
+            }
+        }
+    }
+
+    /**
+     * Rollback a connection. Does nothing if the connection passed is
+     * <code>null</code> and logs any exception as warning.
+     *
+     * @param connection connection.
+     */
+    private static void rollback(Connection connection) {
+        if (connection != null) {
+            try {
+                connection.rollback();
+            } catch (SQLException e) {
+                String msg = "Error while rolling back connection: " + e.getMessage();
+                log.warn(msg);
+            }
+        }
+    }
+
+    /**
+     * Closes the given database connection. Does nothing if the connection
+     * passed is <code>null</code> and logs any exception as warning.
+     *
+     * @param connection database connection
+     */
+    private static void close(Connection connection) {
+        if (connection != null) {
+            try {
+                connection.close();
+            } catch (SQLException e) {
+                String msg = "Error while closing connection: " + e.getMessage();
+                log.warn(msg);
+            }
+        }
+    }
+
+    /**
+     * Close some input stream.  Does nothing if the input stream
+     * passed is <code>null</code> and logs any exception as warning.
+     *
+     * @param in input stream, may be <code>null</code>.
+     */
+    private static void close(InputStream in) {
+        if (in != null) {
+            try {
+                in.close();
+            } catch (IOException e) {
+                String msg = "Error while closing input stream: " + e.getMessage();
+                log.warn(msg);
+            }
+        }
+    }
+
+    /**
+     * Close some statement.  Does nothing if the statement
+     * passed is <code>null</code> and logs any exception as warning.
+     *
+     * @param stmt statement, may be <code>null</code>.
+     */
+    private static void close(Statement stmt) {
+        if (stmt != null) {
+            try {
+                stmt.close();
+            } catch (SQLException e) {
+                String msg = "Error while closing statement: " + e.getMessage();
+                log.warn(msg);
+            }
+        }
+    }
+
+    /**
+     * Close some resultset.  Does nothing if the result set
+     * passed is <code>null</code> and logs any exception as warning.
+     *
+     * @param rs resultset, may be <code>null</code>.
+     */
+    private static void close(ResultSet rs) {
+        if (rs != null) {
+            try {
+                rs.close();
+            } catch (SQLException e) {
+                String msg = "Error while closing result set: " + e.getMessage();
+                log.warn(msg);
+            }
+        }
+    }
+
+    /**
+     * Checks the currently established connection. If the connection no longer
+     * exists, waits until at least <code>reconnectTimeMs</code> have passed
+     * since the error occurred and recreates the connection.
+     */
+    private void checkConnection() throws SQLException, JournalException {
+        if (connection == null) {
+            long delayMs = reconnectTimeMs - System.currentTimeMillis();
+            if (delayMs > 0) {
+                try {
+                    Thread.sleep(delayMs);
+                } catch (InterruptedException e) {
+                    /* ignore */
+                }
+            }
+            connection = getConnection();
+            prepareStatements();
+        }
+    }
+
+    /**
+     * Checks if the required schema objects exist and creates them if they
+     * don't exist yet.
+     *
+     * @throws Exception if an error occurs
+     */
+    private void checkSchema() throws Exception {
+        if (!tableExists(connection.getMetaData(), schemaObjectPrefix + DEFAULT_JOURNAL_TABLE)) {            // read ddl from resources
+            InputStream in = DatabaseJournal.class.getResourceAsStream(databaseType + ".ddl");
+            if (in == null) {
+                String msg = "No database-specific DDL found: '" + databaseType + ".ddl"
+                    + "', falling back to '" + DEFAULT_DDL_NAME + "'.";
+                log.info(msg);
+                in = DatabaseJournal.class.getResourceAsStream(DEFAULT_DDL_NAME);
+                if (in == null) {
+                    msg = "Unable to load '" + DEFAULT_DDL_NAME + "'.";
+                    throw new JournalException(msg);
+                }
+            }
+            BufferedReader reader = new BufferedReader(new InputStreamReader(in));
+            Statement stmt = connection.createStatement();
+            try {
+                String sql = reader.readLine();
+                while (sql != null) {
+                    // Skip comments and empty lines
+                    if (!sql.startsWith("#") && sql.length() > 0) {
+                        // replace prefix variable
+                        sql = createSchemaSQL(sql);
+                        // execute sql stmt
+                        stmt.executeUpdate(sql);
+                    }
+                    // read next sql stmt
+                    sql = reader.readLine();
+                }
+            } finally {
+                close(in);
+                close(stmt);
+            }
+        }
+    }
+
+    /**
      * Checks if the local revision schema objects exist and creates them if they
      * don't exist yet.
      *
      * @throws Exception if an error occurs
      */
     private void checkLocalRevisionSchema() throws Exception {
-        InputStream localRevisionDDLStream = null;
-        InputStream in = DatabaseJournal.class.getResourceAsStream(databaseType + ".ddl");
-        try {
+        if (!tableExists(connection.getMetaData(), schemaObjectPrefix + LOCAL_REVISIONS_TABLE)) {
+            log.info("Creating " + schemaObjectPrefix + LOCAL_REVISIONS_TABLE + " table");
+            // read ddl from resources
+            InputStream in = DatabaseJournal.class.getResourceAsStream(databaseType + ".ddl");
+            if (in == null) {
+                String msg = "No database-specific DDL found: '" + databaseType + ".ddl" +
+                        "', falling back to '" + DEFAULT_DDL_NAME + "'.";
+                log.info(msg);
+                in = DatabaseJournal.class.getResourceAsStream(DEFAULT_DDL_NAME);
+                if (in == null) {
+                    msg = "Unable to load '" + DEFAULT_DDL_NAME + "'.";
+                    throw new JournalException(msg);
+                }
+            }
             BufferedReader reader = new BufferedReader(new InputStreamReader(in));
-            String sql = reader.readLine();
-            while (sql != null) {
-                // Skip comments and empty lines, and select only the statement to create the LOCAL_REVISIONS
-                // table.
-                if (!sql.startsWith("#") && sql.length() > 0 && sql.indexOf(LOCAL_REVISIONS_TABLE) != -1) {
-                    localRevisionDDLStream = new ByteArrayInputStream(sql.getBytes());
-                    break;
+            Statement stmt = connection.createStatement();
+            try {
+                String sql = reader.readLine();
+                while (sql != null) {
+                    // Skip comments and empty lines, and select only the statement
+                    // to create the LOCAL_REVISIONS table.
+                    if (!sql.startsWith("#") && sql.length() > 0
+                            && sql.indexOf(LOCAL_REVISIONS_TABLE) != -1) {
+                        // replace prefix variable
+                        sql = createSchemaSQL(sql);
+                        // execute sql stmt
+                        stmt.executeUpdate(sql);
+                    }
+                    // read next sql stmt
+                    sql = reader.readLine();
                 }
-                // read next sql stmt
-                sql = reader.readLine();
+            } finally {
+                close(in);
+                close(stmt);
             }
+        }
+    }
+
+    /**
+     * Checks whether the required table(s) exist in the schema. May be
+     * overridden by subclasses to allow different table names.
+     *
+     * @param metaData database meta data
+     * @return <code>true</code> if the schema exists
+     * @throws SQLException if an SQL error occurs
+     */
+    protected boolean tableExists(DatabaseMetaData metaData, String tableName)
+        throws SQLException {
+
+        if (metaData.storesLowerCaseIdentifiers()) {
+            tableName = tableName.toLowerCase();
+        } else if (metaData.storesUpperCaseIdentifiers()) {
+            tableName = tableName.toUpperCase();
+        }
+
+        ResultSet rs = metaData.getTables(null, null, tableName, null);
+
+        try {
+            return rs.next();
         } finally {
-            IOUtils.closeQuietly(in);
+            rs.close();
         }
-        // Run the schema check for the single table
-        new CheckSchemaOperation(conHelper, localRevisionDDLStream, schemaObjectPrefix
-                + LOCAL_REVISIONS_TABLE).addVariableReplacement(
-            CheckSchemaOperation.SCHEMA_OBJECT_PREFIX_VARIABLE, schemaObjectPrefix).run();
     }
 
     /**
+     * Creates an SQL statement for schema creation by variable substitution.
+     *
+     * @param sql a SQL string which may contain variables to substitute
+     * @return a valid SQL string
+     */
+    protected String createSchemaSQL(String sql) {
+        return Text.replace(sql, SCHEMA_OBJECT_PREFIX_VARIABLE, schemaObjectPrefix);
+    }
+
+    /**
      * Builds the SQL statements. May be overridden by subclasses to allow
      * different table and/or column names.
      */
@@ -587,6 +938,23 @@
     }
 
     /**
+     * Prepares the SQL statements.
+     *
+     * @throws SQLException if an error occurs
+     */
+    private void prepareStatements() throws SQLException {
+        selectRevisionsStmt = connection.prepareStatement(selectRevisionsStmtSQL);
+        updateGlobalStmt = connection.prepareStatement(updateGlobalStmtSQL);
+        selectGlobalStmt = connection.prepareStatement(selectGlobalStmtSQL);
+        insertRevisionStmt = connection.prepareStatement(insertRevisionStmtSQL);
+        selectMinLocalRevisionStmt = connection.prepareStatement(selectMinLocalRevisionStmtSQL);
+        cleanRevisionStmt = connection.prepareStatement(cleanRevisionStmtSQL);
+        getLocalRevisionStmt = connection.prepareStatement(getLocalRevisionStmtSQL);
+        insertLocalRevisionStmt = connection.prepareStatement(insertLocalRevisionStmtSQL);
+        updateLocalRevisionStmt = connection.prepareStatement(updateLocalRevisionStmtSQL);
+    }
+
+    /**
      * Bean getters
      */
     public String getDriver() {
@@ -599,7 +967,7 @@
 
     /**
      * Get the database type.
-     * 
+     *
      * @return the database type
      */
     public String getDatabaseType() {
@@ -610,7 +978,7 @@
      * Get the database type.
      * @deprecated
      * This method is deprecated; {@link #getDatabaseType} should be used instead.
-     * 
+     *
      * @return the database type
      */
     public String getSchema() {
@@ -629,6 +997,10 @@
         return password;
     }
 
+    public long getReconnectDelayMs() {
+        return reconnectDelayMs;
+    }
+
     public boolean getJanitorEnabled() {
         return janitorEnabled;
     }
@@ -654,7 +1026,7 @@
 
     /**
      * Set the database type.
-     * 
+     *
      * @param databaseType the database type
      */
     public void setDatabaseType(String databaseType) {
@@ -664,8 +1036,8 @@
     /**
      * Set the database type.
     * @deprecated
-    * This method is deprecated; {@link #getDatabaseType} should be used instead.
-     * 
+    * This method is deprecated; {@link #setDatabaseType} should be used instead.
+     *
      * @param databaseType the database type
      */
     public void setSchema(String databaseType) {
@@ -684,6 +1056,10 @@
         this.password = password;
     }
 
+    public void setReconnectDelayMs(long reconnectDelayMs) {
+        this.reconnectDelayMs = reconnectDelayMs;
+    }
+
     public void setJanitorEnabled(boolean enabled) {
         this.janitorEnabled = enabled;
     }
@@ -703,14 +1079,6 @@
         janitorNextRun.set(Calendar.MILLISECOND, 0);
     }
 
-    public String getDataSourceName() {
-        return dataSourceName;
-    }
-
-    public void setDataSourceName(String dataSourceName) {
-        this.dataSourceName = dataSourceName;
-    }
-
     /**
      * @return whether the schema check is enabled
      */
@@ -738,9 +1106,9 @@
         private long localRevision;
 
         /**
-         * Indicates whether the init method has been called. 
+         * Indicates whether the init method has been called.
          */
-        private boolean initialized = false;
+        private boolean initialized;
 
         /**
          * Checks whether there's a local revision value in the database for this
@@ -751,18 +1119,29 @@
          * @throws JournalException on error
          */
         protected synchronized long init(long revision) throws JournalException {
-            ResultSet rs = null;
             try {
+                // Check whether the connection is available
+                checkConnection();
+
                 // Check whether there is an entry in the database.
-                rs = conHelper.exec(getLocalRevisionStmtSQL, new Object[]{getId()}, false, 0);
+                getLocalRevisionStmt.clearParameters();
+                getLocalRevisionStmt.clearWarnings();
+                getLocalRevisionStmt.setString(1, getId());
+                getLocalRevisionStmt.execute();
+                ResultSet rs = getLocalRevisionStmt.getResultSet();
                 boolean exists = rs.next();
                 if (exists) {
                     revision = rs.getLong(1);
                 }
+                rs.close();
 
                 // Insert the given revision in the database
                 if (!exists) {
-                    conHelper.exec(insertLocalRevisionStmtSQL, revision, getId());
+                    insertLocalRevisionStmt.clearParameters();
+                    insertLocalRevisionStmt.clearWarnings();
+                    insertLocalRevisionStmt.setLong(1, revision);
+                    insertLocalRevisionStmt.setString(2, getId());
+                    insertLocalRevisionStmt.execute();
                 }
 
                 // Set the cached local revision and return
@@ -772,9 +1151,8 @@
 
             } catch (SQLException e) {
                 log.warn("Failed to initialize local revision.", e);
+                DatabaseJournal.this.close(true);
                 throw new JournalException("Failed to initialize local revision", e);
-            } finally {
-                DbUtility.close(rs);
             }
         }
 
@@ -799,18 +1177,25 @@
 
             // Update the cached value and the table with local revisions.
             try {
-                conHelper.exec(updateLocalRevisionStmtSQL, localRevision, getId());
+                // Check whether the connection is available
+                checkConnection();
+                updateLocalRevisionStmt.clearParameters();
+                updateLocalRevisionStmt.clearWarnings();
+                updateLocalRevisionStmt.setLong(1, localRevision);
+                updateLocalRevisionStmt.setString(2, getId());
+                updateLocalRevisionStmt.execute();
                 this.localRevision = localRevision;
             } catch (SQLException e) {
                 log.warn("Failed to update local revision.", e);
-                throw new JournalException("Failed to update local revision.", e);
+                DatabaseJournal.this.close(true);
             }
         }
-        
+
         /**
          * {@inheritDoc}
          */
-        public void close() {
+        public synchronized void close() {
+            // Do nothing: The statements are closed in DatabaseJournal.close()
         }
     }
 
@@ -840,30 +1225,40 @@
             }
             log.info("Interrupted: stopping clean-up task.");
         }
-        
+
         /**
          * Cleans old revisions from the clustering table.
          */
         protected void cleanUpOldRevisions() {
-            ResultSet rs = null;
             try {
                 long minRevision = 0;
-                rs = conHelper.exec(selectMinLocalRevisionStmtSQL, null, false, 0);
+
+                // Check whether the connection is available
+                checkConnection();
+
+                // Find the minimal local revision
+                selectMinLocalRevisionStmt.clearParameters();
+                selectMinLocalRevisionStmt.clearWarnings();
+                selectMinLocalRevisionStmt.execute();
+                ResultSet rs = selectMinLocalRevisionStmt.getResultSet();
                 boolean cleanUp = rs.next();
                 if (cleanUp) {
                     minRevision = rs.getLong(1);
                 }
+                rs.close();
 
                 // Clean up if necessary:
                 if (cleanUp) {
-                    conHelper.exec(cleanRevisionStmtSQL, minRevision);
+                    cleanRevisionStmt.clearParameters();
+                    cleanRevisionStmt.clearWarnings();
+                    cleanRevisionStmt.setLong(1, minRevision);
+                    cleanRevisionStmt.execute();
                     log.info("Cleaned old revisions up to revision " + minRevision + ".");
                 }
 
             } catch (Exception e) {
                 log.warn("Failed to clean up old revisions.", e);
-            } finally {
-                DbUtility.close(rs);
+                close(true);
             }
         }
     }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/journal/MSSqlDatabaseJournal.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/journal/MSSqlDatabaseJournal.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/journal/MSSqlDatabaseJournal.java	(.../trunk)	(revision 827970)
@@ -16,7 +16,7 @@
  */
 package org.apache.jackrabbit.core.journal;
 
-import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
+import org.apache.jackrabbit.util.Text;
 
 /**
  * It has the following property in addition to those of the DatabaseJournal:
@@ -39,15 +39,6 @@
     }
 
     /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected CheckSchemaOperation createCheckSchemaOperation() {
-        return super.createCheckSchemaOperation().addVariableReplacement(
-            CheckSchemaOperation.TABLE_SPACE_VARIABLE, tableSpace);
-    }
-
-    /**
      * Returns the configured MS SQL table space.
      * @return the configured MS SQL table space.
      */
@@ -66,4 +57,13 @@
             this.tableSpace = "";
         }
     }
+
+    /**
+     * {@inheritDoc}
+     */
+    protected String createSchemaSQL(String sql) {
+        return Text.replace(
+                super.createSchemaSQL(sql), "${tableSpace}", tableSpace);
+    }
+
 }
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/journal/OracleDatabaseJournal.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/journal/OracleDatabaseJournal.java	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/journal/OracleDatabaseJournal.java	(.../trunk)	(revision 827970)
@@ -16,11 +16,11 @@
  */
 package org.apache.jackrabbit.core.journal;
 
-import javax.sql.DataSource;
+import java.sql.DatabaseMetaData;
+import java.sql.ResultSet;
+import java.sql.SQLException;
 
-import org.apache.jackrabbit.core.util.db.CheckSchemaOperation;
-import org.apache.jackrabbit.core.util.db.ConnectionHelper;
-import org.apache.jackrabbit.core.util.db.OracleConnectionHelper;
+import org.apache.jackrabbit.util.Text;
 
 /**
  * It has the following property in addition to those of the DatabaseJournal:
@@ -35,28 +35,9 @@
         "${tableSpace}";
 
     /** the Oracle table space to use */
-    protected String tableSpace = "";
+    protected String tableSpace;
 
     /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception {
-        OracleConnectionHelper helper = new OracleConnectionHelper(dataSrc, false);
-        helper.init();
-        return helper;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected CheckSchemaOperation createCheckSchemaOperation() {
-        return super.createCheckSchemaOperation().addVariableReplacement(
-            CheckSchemaOperation.TABLE_SPACE_VARIABLE, tableSpace);
-    }
-
-    /**
      * Returns the configured Oracle table space.
      * @return the configured Oracle table space.
      */
@@ -69,10 +50,46 @@
      * @param tableSpace the Oracle table space.
      */
     public void setTableSpace(String tableSpace) {
-        if (tableSpace != null && tableSpace.trim().length() > 0) {
-            this.tableSpace = "tablespace " + tableSpace.trim();
+        if (tableSpace != null) {
+            this.tableSpace = tableSpace.trim();
         } else {
-            this.tableSpace = "";
+            this.tableSpace = null;
         }
     }
+
+    /**
+     * {@inheritDoc}
+     */
+    protected String createSchemaSQL(String sql) {
+        // replace the schemaObjectPrefix
+        sql = super.createSchemaSQL(sql);
+        // set the tablespace if it is defined
+        String tspace;
+        if (tableSpace == null || "".equals(tableSpace)) {
+            tspace = "";
+        } else {
+            tspace = "tablespace " + tableSpace;
+        }
+        return Text.replace(sql, TABLE_SPACE_VARIABLE, tspace).trim();
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    protected boolean tableExists(DatabaseMetaData metaData, String tableName) throws SQLException {
+        if (metaData.storesLowerCaseIdentifiers()) {
+            tableName = tableName.toLowerCase();
+        } else if (metaData.storesUpperCaseIdentifiers()) {
+            tableName = tableName.toUpperCase();
+        }
+
+        String userName = metaData.getUserName();
+        ResultSet rs = metaData.getTables(null, userName, tableName, null);
+
+        try {
+            return rs.next();
+        } finally {
+            rs.close();
+        }
+    }
 }
Index: jackrabbit-core/pom.xml
===================================================================
--- jackrabbit-core/pom.xml	(.../sandbox/JCR-1456)	(revision 827970)
+++ jackrabbit-core/pom.xml	(.../trunk)	(revision 827970)
@@ -166,11 +166,6 @@
       <artifactId>commons-io</artifactId>
     </dependency>
     <dependency>
-      <groupId>commons-dbcp</groupId>
-      <artifactId>commons-dbcp</artifactId>
-      <version>1.2.2</version>
-    </dependency>
-    <dependency>
       <groupId>javax.jcr</groupId>
       <artifactId>jcr</artifactId>
     </dependency>
@@ -246,206 +241,4 @@
     </dependency>
   </dependencies>
 
-  <!--
-    These profiles can be used to run the (integration) tests against different DB backends.
-    For instance, if you want to run the integration tests against MySQL backend, do:
-    
-      mvn clean integration-test -Pmysql,use-descriptor-overlay
-    
-    Note: the ${config.db.name} database is dropped and re-created in the clean phase. 
-  -->
-  <profiles>
-    <profile>
-      <id>mysql</id>
-      <properties>
-        <config.db.name>jackrabbit</config.db.name>
-        <config.db.fsclass>org.apache.jackrabbit.core.fs.db.DbFileSystem</config.db.fsclass>
-        <config.db.dsclass>org.apache.jackrabbit.core.data.db.DbDataStore</config.db.dsclass>
-        <config.db.pmclass>org.apache.jackrabbit.core.persistence.bundle.MySqlPersistenceManager</config.db.pmclass>
-        <config.db.journalclass>org.apache.jackrabbit.core.journal.DatabaseJournal</config.db.journalclass>
-        <config.db.schema>mysql</config.db.schema>
-        <config.db.validation.query>select 1</config.db.validation.query>
-        <config.db.user>user</config.db.user>
-        <config.db.pwd>pwd</config.db.pwd>
-        <config.db.driver>com.mysql.jdbc.Driver</config.db.driver>
-        <config.db.url>jdbc:mysql://localhost:3306/${config.db.name}?autoReconnect=true</config.db.url>
-        <config.db.metaurl>jdbc:mysql://localhost:3306/mysql?autoReconnect=true</config.db.metaurl>
-        <config.db.dropcommand>drop database ${config.db.name}</config.db.dropcommand>
-        <config.db.createcommand>create database ${config.db.name}</config.db.createcommand>
-      </properties>
-    </profile>
-    <profile>
-      <id>mssql</id>
-      <properties>
-        <config.db.name>jackrabbit</config.db.name>
-        <config.db.fsclass>org.apache.jackrabbit.core.fs.db.MSSqlFileSystem</config.db.fsclass>
-        <config.db.dsclass>org.apache.jackrabbit.core.data.db.DbDataStore</config.db.dsclass>
-        <config.db.pmclass>org.apache.jackrabbit.core.persistence.bundle.MSSqlPersistenceManager</config.db.pmclass>
-        <config.db.journalclass>org.apache.jackrabbit.core.journal.MSSqlDatabaseJournal</config.db.journalclass>
-        <config.db.schema>mssql</config.db.schema>
-        <config.db.validation.query>select 1</config.db.validation.query>
-        <config.db.user>user</config.db.user>
-        <config.db.pwd>pwd</config.db.pwd>
-        <config.db.driver>net.sourceforge.jtds.jdbc.Driver</config.db.driver>
-        <config.db.url>jdbc:jtds:sqlserver://localhost:2433/${config.db.name}</config.db.url>
-        <config.db.metaurl>jdbc:jtds:sqlserver://localhost:2433/master</config.db.metaurl>
-        <config.db.dropcommand>drop database ${config.db.name}</config.db.dropcommand>
-        <config.db.createcommand>create database ${config.db.name}</config.db.createcommand>
-      </properties>
-    </profile>
-    <profile>
-      <id>oracle</id>
-      <properties>
-        <config.db.name>unused</config.db.name>
-        <config.db.fsclass>org.apache.jackrabbit.core.fs.db.OracleFileSystem</config.db.fsclass>
-        <config.db.dsclass>org.apache.jackrabbit.core.data.db.DbDataStore</config.db.dsclass>
-        <config.db.pmclass>org.apache.jackrabbit.core.persistence.bundle.OraclePersistenceManager</config.db.pmclass>
-        <config.db.journalclass>org.apache.jackrabbit.core.journal.OracleDatabaseJournal</config.db.journalclass>
-        <config.db.schema>oracle</config.db.schema>
-        <config.db.validation.query>select 'validationQuery' from dual</config.db.validation.query>
-        <config.db.user>user</config.db.user>
-        <config.db.pwd>password</config.db.pwd>
-        <config.db.driver>oracle.jdbc.driver.OracleDriver</config.db.driver>
-        <config.db.url>jdbc:oracle:thin:@localhost:1521:xe</config.db.url>
-        <config.db.metaurl>unused</config.db.metaurl>
-        <config.db.dropcommand>unused</config.db.dropcommand>
-        <config.db.createcommand>unused</config.db.createcommand>
-      </properties>
-    </profile>
-    <profile>
-      <id>use-descriptor-overlay</id>
-      <build>
-      <plugins>
-        <!-- Drop/create the test database on clean -->        
-        <plugin>
-          <groupId>org.codehaus.mojo</groupId>
-          <artifactId>sql-maven-plugin</artifactId>
-          <dependencies>
-            <dependency>
-              <groupId>mysql</groupId>
-              <artifactId>mysql-connector-java</artifactId>
-              <version>5.1.6</version>
-              <type>jar</type>
-              <scope>provided</scope>
-            </dependency>
-            <dependency>
-              <groupId>net.sourceforge.jtds</groupId>
-              <artifactId>jtds</artifactId>
-              <version>1.2.2</version>
-              <scope>provided</scope>
-            </dependency>
-          </dependencies>
-          <configuration>
-            <driver>${config.db.driver}</driver>
-            <url>${config.db.metaurl}</url>
-            <username>${config.db.user}</username>
-            <password>${config.db.pwd}</password>
-            <settingsKey>sensibleKey</settingsKey>
-          </configuration>
-          <executions>
-            <execution>
-              <id>drop-db</id>
-              <phase>clean</phase>
-              <goals>
-                <goal>execute</goal>
-              </goals>
-              <configuration>
-                <autocommit>true</autocommit>
-                <sqlCommand>${config.db.dropcommand}</sqlCommand>
-                <onError>continue</onError>
-              </configuration>
-            </execution>
-            <execution>
-              <id>create-db</id>
-              <phase>clean</phase>
-              <goals>
-                <goal>execute</goal>
-              </goals>
-              <configuration>
-                <autocommit>true</autocommit>
-                <sqlCommand>${config.db.createcommand}</sqlCommand>
-              </configuration>
-            </execution>
-          </executions>
-        </plugin>
-        <plugin>
-          <artifactId>maven-antrun-plugin</artifactId>
-          <executions>
-            <execution>
-              <id>overlay-repository-descriptors</id>
-              <phase>process-test-resources</phase>
-              <configuration>
-                <tasks>
-                  <copy todir="${project.build.directory}/repository" overwrite="true">
-                    <fileset dir="${basedir}/src/test/repository-descriptor-overlay" />
-                    <filterset>
-                      <filter token="repo.id" value="A" />
-                      <filter token="config.db.schema" value="${config.db.schema}" />
-                      <filter token="config.db.validation.query" value="${config.db.validation.query}" />
-                      <filter token="config.db.driver" value="${config.db.driver}" />
-                      <filter token="config.db.url" value="${config.db.url}" />
-                      <filter token="config.db.user" value="${config.db.user}" />
-                      <filter token="config.db.pwd" value="${config.db.pwd}" />
-                      <filter token="config.db.fsclass" value="${config.db.fsclass}" />
-                      <filter token="config.db.dsclass" value="${config.db.dsclass}" />
-                      <filter token="config.db.pmclass" value="${config.db.pmclass}" />
-                      <filter token="config.db.journalclass" value="${config.db.journalclass}" />
-                    </filterset>
-                  </copy>
-                  <copy todir="${project.build.directory}/repository-2" overwrite="true">
-                    <fileset dir="${basedir}/src/test/repository-descriptor-overlay" />
-                    <filterset>
-                      <filter token="repo.id" value="B" />
-                      <filter token="config.db.schema" value="${config.db.schema}" />
-                      <filter token="config.db.validation.query" value="${config.db.validation.query}" />
-                      <filter token="config.db.driver" value="${config.db.driver}" />
-                      <filter token="config.db.url" value="${config.db.url}" />
-                      <filter token="config.db.user" value="${config.db.user}" />
-                      <filter token="config.db.pwd" value="${config.db.pwd}" />
-                      <filter token="config.db.fsclass" value="${config.db.fsclass}" />
-                      <filter token="config.db.dsclass" value="${config.db.dsclass}" />
-                      <filter token="config.db.pmclass" value="${config.db.pmclass}" />
-                      <filter token="config.db.journalclass" value="${config.db.journalclass}" />
-                    </filterset>
-                  </copy>
-                </tasks>
-              </configuration>
-              <goals>
-                <goal>run</goal>
-              </goals>
-            </execution>
-          </executions>
-          <dependencies>
-            <dependency>
-              <groupId>ant</groupId>
-              <artifactId>ant-optional</artifactId>
-              <version>1.5.3-1</version>
-            </dependency>
-          </dependencies>
-        </plugin>
-      </plugins>
-      </build>
-      <dependencies>
-        <dependency>
-          <groupId>mysql</groupId>
-          <artifactId>mysql-connector-java</artifactId>
-          <version>5.1.6</version>
-          <type>jar</type>
-          <scope>test</scope>
-        </dependency>
-        <dependency>
-          <groupId>net.sourceforge.jtds</groupId>
-          <artifactId>jtds</artifactId>
-          <version>1.2.2</version>
-          <scope>test</scope>
-        </dependency>
-        <dependency>
-          <groupId>com.oracle</groupId>
-          <artifactId>ojdbc14</artifactId>
-          <version>10.2.0.3.0</version>
-          <scope>test</scope>
-        </dependency>
-      </dependencies>
-    </profile>
-  </profiles>
 </project>

Property changes on: .
___________________________________________________________________
Modified: svn:mergeinfo
   Reverse-merged /incubator/jackrabbit/trunk:r45992-387421
   Reverse-merged /jackrabbit/trunk:r387422-827959

