Index: jackrabbit-aws-ext/pom.xml =================================================================== --- jackrabbit-aws-ext/pom.xml (revision 1564627) +++ jackrabbit-aws-ext/pom.xml (working copy) @@ -27,6 +27,7 @@ jackrabbit-aws-ext Jackrabbit AWS Extension Jackrabbit extenstion to Amazon Webservices + bundle @@ -48,7 +49,7 @@ org.apache.jackrabbit - jackrabbit-core + jackrabbit-data ${project.version} @@ -85,6 +86,20 @@ + + org.apache.felix + maven-bundle-plugin + true + + + org.apache.rat + apache-rat-plugin + + + .checkstyle + + + Index: jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/Backend.java =================================================================== --- jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/Backend.java (revision 1564627) +++ jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/Backend.java (working copy) @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.jackrabbit.aws.ext.ds; - -import java.io.File; -import java.io.InputStream; -import java.util.Iterator; -import java.util.List; - -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataStoreException; - -/** - * The interface defines the backend which can be plugged into - * {@link CachingDataStore}. - */ -public interface Backend { - - /** - * This method initialize backend with the configuration. - * - * @param store {@link CachingDataStore} - * @param homeDir path of repository home dir. - * @param config path of config property file. - * @throws DataStoreException - */ - void init(CachingDataStore store, String homeDir, String config) - throws DataStoreException; - - /** - * Return inputstream of record identified by identifier. - * - * @param identifier identifier of record. - * @return inputstream of the record. - * @throws DataStoreException if record not found or any error. - */ - InputStream read(DataIdentifier identifier) throws DataStoreException; - - /** - * Return length of record identified by identifier. - * - * @param identifier identifier of record. - * @return length of the record. - * @throws DataStoreException if record not found or any error. - */ - long getLength(DataIdentifier identifier) throws DataStoreException; - - /** - * Return lastModified of record identified by identifier. - * - * @param identifier identifier of record. - * @return lastModified of the record. - * @throws DataStoreException if record not found or any error. - */ - long getLastModified(DataIdentifier identifier) throws DataStoreException; - - /** - * Stores file to backend with identifier used as key. If key pre-exists, it - * updates the timestamp of the key. - * - * @param identifier key of the file - * @param file file that would be stored in backend. - * @throws DataStoreException for any error. - */ - void write(DataIdentifier identifier, File file) throws DataStoreException; - - /** - * Returns identifiers of all records that exists in backend. - * @return iterator consisting of all identifiers - * @throws DataStoreException - */ - Iterator getAllIdentifiers() throws DataStoreException; - - /** - * Update timestamp of record identified by identifier if minModifiedDate is - * greater than record's lastModified else no op. - * - * @throws DataStoreException if record not found. - */ - void touch(DataIdentifier identifier, long minModifiedDate) - throws DataStoreException; - /** - * This method check the existence of record in backend. - * @param identifier identifier to be checked. - * @return true if records exists else false. - * @throws DataStoreException - */ - boolean exists(DataIdentifier identifier) throws DataStoreException; - - /** - * Close backend and release resources like database connection if any. - * @throws DataStoreException - */ - void close() throws DataStoreException; - - /** - * Delete all records which are older than timestamp. - * @param timestamp - * @return list of identifiers which are deleted. - * @throws DataStoreException - */ - List deleteAllOlderThan(long timestamp) throws DataStoreException; - - /** - * Delete record identified by identifier. No-op if identifier not found. - * @param identifier - * @throws DataStoreException - */ - void deleteRecord(DataIdentifier identifier) throws DataStoreException; -} Index: jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/CachingDataRecord.java =================================================================== --- jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/CachingDataRecord.java (revision 1564627) +++ jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/CachingDataRecord.java (working copy) @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.jackrabbit.aws.ext.ds; - -import java.io.InputStream; - -import org.apache.jackrabbit.core.data.AbstractDataRecord; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataStoreException; - -/** - * CachingDataRecord which stores reference to {@link CachingDataStore}. This - * class doesn't store any references to attributes but attributes are fetched - * on demand from {@link CachingDataStore}. - */ -public class CachingDataRecord extends AbstractDataRecord { - - private final CachingDataStore store; - - public CachingDataRecord(CachingDataStore store, DataIdentifier identifier) { - super(store, identifier); - this.store = store; - } - - @Override - public long getLastModified() { - try { - return store.getLastModified(getIdentifier()); - } catch (DataStoreException dse) { - return 0; - } - } - - @Override - public long getLength() throws DataStoreException { - return store.getLength(getIdentifier()); - } - - @Override - public InputStream getStream() throws DataStoreException { - return store.getStream(getIdentifier()); - } - -} Index: jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/CachingDataStore.java =================================================================== --- jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/CachingDataStore.java (revision 1564627) +++ jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/CachingDataStore.java (working copy) @@ -1,585 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.jackrabbit.aws.ext.ds; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.UnsupportedEncodingException; -import java.lang.ref.WeakReference; -import java.security.DigestOutputStream; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.WeakHashMap; - -import javax.jcr.RepositoryException; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.aws.ext.LocalCache; -import org.apache.jackrabbit.core.data.AbstractDataStore; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.data.MultiDataStoreAware; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A caching data store that consists of {@link LocalCache} and {@link Backend}. - * {@link Backend} is single source of truth. All methods first try to fetch - * information from {@link LocalCache}. If record is not available in - * {@link LocalCache}, then it is fetched from {@link Backend} and saved to - * {@link LocalCache} for further access. This class is designed to work without - * {@link LocalCache} and then all information is fetched from {@link Backend}. - * To disable {@link LocalCache} set {@link #setCacheSize(long)} to 0. * - * Configuration: - * - *
- * <DataStore class="org.apache.jackrabbit.aws.ext.ds.CachingDataStore">
- * 
- *     <param name="{@link #setPath(String) path}" value="/data/datastore"/>
- *     <param name="{@link #setConfig(String) config}" value="${rep.home}/backend.properties"/>
- *     <param name="{@link #setCacheSize(long) cacheSize}" value="68719476736"/>
- *     <param name="{@link #setSecret(String) secret}" value="123456"/>
- *     <param name="{@link #setCachePurgeTrigFactor(double)}" value="0.95d"/>
- *     <param name="{@link #setCachePurgeResizeFactor(double) cacheSize}" value="0.85d"/>
- *     <param name="{@link #setMinRecordLength(int) minRecordLength}" value="1024"/>
- * </DataStore>
- */
-public abstract class CachingDataStore extends AbstractDataStore implements
-        MultiDataStoreAware {
-
-    /**
-     * Logger instance.
-     */
-    private static final Logger LOG = LoggerFactory.getLogger(CachingDataStore.class);
-
-    /**
-     * The digest algorithm used to uniquely identify records.
-     */
-    private static final String DIGEST = "SHA-1";
-
-    private static final String DS_STORE = ".DS_Store";
-    
-    /**
-     * Name of the directory used for temporary files. Must be at least 3
-     * characters.
-     */
-    private static final String TMP = "tmp";
-    
-    /**
-     * All data identifiers that are currently in use are in this set until they
-     * are garbage collected.
-     */
-    protected Map> inUse = 
-            Collections.synchronizedMap(new WeakHashMap>());
-
-    protected Backend backend;
-
-    /**
-     * The minimum size of an object that should be stored in this data store.
-     */
-    private int minRecordLength = 16 * 1024;
-
-    private String path;
-
-    private File directory;
-
-    private File tmpDir;
-
-    private String secret;
-
-    /**
-     * The optional backend configuration.
-     */
-    private String config;
-
-    /**
-     * The minimum modified date. If a file is accessed (read or write) with a
-     * modified date older than this value, the modified date is updated to the
-     * current time.
-     */
-    private long minModifiedDate;
-
-    /**
-     * Cache purge trigger factor. Cache will undergo in auto-purge mode if
-     * cache current size is greater than cachePurgeTrigFactor * cacheSize
-     */
-    private double cachePurgeTrigFactor = 0.95d;
-
-    /**
-     * Cache resize factor. After auto-purge mode, cache current size would just
-     * greater than cachePurgeResizeFactor * cacheSize cacheSize
-     */
-    private double cachePurgeResizeFactor = 0.85d;
-
-    /**
-     * The number of bytes in the cache. The default value is 64 GB.
-     */
-    private long cacheSize = 64L * 1024 * 1024 * 1024;
-
-    /**
-     * The local file system cache.
-     */
-    private LocalCache cache;
-
-    abstract Backend createBackend();
-
-    abstract String getMarkerFile();
-
-    /**
-     * Initialized the data store. If the path is not set, <repository
-     * home>/repository/datastore is used. This directory is automatically
-     * created if it does not yet exist. During first initialization, it upload
-     * all files from local datastore to backed and local datastore act as a
-     * local cache.
-     */
-    @Override
-    public void init(String homeDir) throws RepositoryException {
-        if (path == null) {
-            path = homeDir + "/repository/datastore";
-        }
-        directory = new File(path);
-        try {
-            mkdirs(directory);
-        } catch (IOException e) {
-            throw new DataStoreException("Could not create directory "
-                + directory.getAbsolutePath(), e);
-        }
-        tmpDir = new File(homeDir, "/repository/s3tmp");
-        try {
-            if (!mkdirs(tmpDir)) {
-                FileUtils.cleanDirectory(tmpDir);
-                LOG.info("tmp = " + tmpDir.getPath() + " cleaned");
-            }
-        } catch (IOException e) {
-            throw new DataStoreException("Could not create directory "
-                + tmpDir.getAbsolutePath(), e);
-        }
-        LOG.info("cachePurgeTrigFactor = " + cachePurgeTrigFactor
-            + ", cachePurgeResizeFactor = " + cachePurgeResizeFactor);
-        backend = createBackend();
-        backend.init(this, path, config);
-        String markerFileName = getMarkerFile();
-        if (markerFileName != null) {
-            // create marker file in homeDir to avoid deletion in cache cleanup.
-            File markerFile = new File(homeDir, markerFileName);
-            if (!markerFile.exists()) {
-                LOG.info("load files from local cache");
-                loadFilesFromCache();
-                try {
-                    markerFile.createNewFile();
-                } catch (IOException e) {
-                    throw new DataStoreException(
-                        "Could not create marker file "
-                            + markerFile.getAbsolutePath(), e);
-                }
-            } else {
-                LOG.info("marker file = " + markerFile.getAbsolutePath()
-                    + " exists");
-            }
-        }
-        cache = new LocalCache(path, tmpDir.getAbsolutePath(), cacheSize,
-            cachePurgeTrigFactor, cachePurgeResizeFactor);
-    }
-
-    /**
-     * Creates a new data record in {@link Backend}. The stream is first
-     * consumed and the contents are saved in a temporary file and the SHA-1
-     * message digest of the stream is calculated. If a record with the same
-     * SHA-1 digest (and length) is found then it is returned. Otherwise new
-     * record is created in {@link Backend} and the temporary file is moved in
-     * place to {@link LocalCache}.
-     * 
-     * @param input binary stream
-     * @return {@link CachingDataRecord}
-     * @throws DataStoreException if the record could not be created.
-     */
-    @Override
-    public DataRecord addRecord(InputStream input) throws DataStoreException {
-        File temporary = null;
-        try {
-            temporary = newTemporaryFile();
-            DataIdentifier tempId = new DataIdentifier(temporary.getName());
-            usesIdentifier(tempId);
-            // Copy the stream to the temporary file and calculate the
-            // stream length and the message digest of the stream
-            MessageDigest digest = MessageDigest.getInstance(DIGEST);
-            OutputStream output = new DigestOutputStream(new FileOutputStream(
-                temporary), digest);
-            try {
-                IOUtils.copyLarge(input, output);
-            } finally {
-                output.close();
-            }
-            DataIdentifier identifier = new DataIdentifier(
-                encodeHexString(digest.digest()));
-            synchronized (this) {
-                usesIdentifier(identifier);
-                backend.write(identifier, temporary);
-                String fileName = getFileName(identifier);
-                cache.store(fileName, temporary);
-            }
-            // this will also make sure that
-            // tempId is not garbage collected until here
-            inUse.remove(tempId);
-            return new CachingDataRecord(this, identifier);
-        } catch (NoSuchAlgorithmException e) {
-            throw new DataStoreException(DIGEST + " not available", e);
-        } catch (IOException e) {
-            throw new DataStoreException("Could not add record", e);
-        } finally {
-            if (temporary != null) {
-                // try to delete - but it's not a big deal if we can't
-                temporary.delete();
-            }
-        }
-    }
-
-    /**
-     * Get a data record for the given identifier or null it data record doesn't
-     * exist in {@link Backend}
-     * 
-     * @param identifier identifier of record.
-     * @return the {@link CachingDataRecord} or null.
-     */
-    @Override
-    public DataRecord getRecordIfStored(DataIdentifier identifier)
-            throws DataStoreException {
-        synchronized (this) {
-            usesIdentifier(identifier);
-            if (!backend.exists(identifier)) {
-                return null;
-            }
-            backend.touch(identifier, minModifiedDate);
-            return new CachingDataRecord(this, identifier);
-        }
-    }
-
-    @Override
-    public void updateModifiedDateOnAccess(long before) {
-        LOG.info("minModifiedDate set to: " + before);
-        minModifiedDate = before;
-    }
-    /**
-     * Retrieves all identifiers from {@link Backend}.
-     */
-    @Override
-    public Iterator getAllIdentifiers()
-            throws DataStoreException {
-        return backend.getAllIdentifiers();
-    }
-
-    /**
-     * This method deletes record from {@link Backend} and then from
-     * {@link LocalCache}
-     */
-    @Override
-    public void deleteRecord(DataIdentifier identifier)
-            throws DataStoreException {
-        String fileName = getFileName(identifier);
-        synchronized (this) {
-            backend.deleteRecord(identifier);
-            cache.delete(fileName);
-        }
-    }
-
-    @Override
-    public synchronized int deleteAllOlderThan(long min)
-            throws DataStoreException {
-        List diList = backend.deleteAllOlderThan(min);
-        // remove entries from local cache
-        for (DataIdentifier identifier : diList) {
-            cache.delete(getFileName(identifier));
-        }
-        return diList.size();
-    }
-
-    /**
-     * Get stream of record from {@link LocalCache}. If record is not available
-     * in {@link LocalCache}, this method fetches record from {@link Backend}
-     * and stores it to {@link LocalCache}. Stream is then returned from cached
-     * record.
-     */
-    InputStream getStream(DataIdentifier identifier) throws DataStoreException {
-        InputStream in = null;
-        try {
-            String fileName = getFileName(identifier);
-            InputStream cached = cache.getIfStored(fileName);
-            if (cached != null) {
-                return cached;
-            }
-            in = backend.read(identifier);
-            return cache.store(fileName, in);
-        } catch (IOException e) {
-            throw new DataStoreException("IO Exception: " + identifier, e);
-        } finally {
-            IOUtils.closeQuietly(in);
-        }
-    }
-
-    /**
-     * Return lastModified of record from {@link Backend} assuming
-     * {@link Backend} as a single source of truth.
-     */
-    long getLastModified(DataIdentifier identifier) throws DataStoreException {
-        LOG.info("accessed lastModified");
-        return backend.getLastModified(identifier);
-    }
-
-    /**
-     * Return the length of record from {@link LocalCache} if available,
-     * otherwise retrieve it from {@link Backend}.
-     */
-    long getLength(DataIdentifier identifier) throws DataStoreException {
-        String fileName = getFileName(identifier);
-        Long length = cache.getFileLength(fileName);
-        if (length != null) {
-            return length.longValue();
-        }
-        return backend.getLength(identifier);
-    }
-
-    @Override
-    protected byte[] getOrCreateReferenceKey() throws DataStoreException {
-        try {
-            return secret.getBytes("UTF-8");
-        } catch (UnsupportedEncodingException e) {
-            throw new DataStoreException(e);
-        }
-    }
-
-    /**
-     * Returns a unique temporary file to be used for creating a new data
-     * record.
-     */
-    private File newTemporaryFile() throws IOException {
-        return File.createTempFile(TMP, null, tmpDir);
-    }
-
-    /**
-     * Load files from {@link LocalCache} to {@link Backend}.
-     */
-    private void loadFilesFromCache() throws RepositoryException {
-        ArrayList files = new ArrayList();
-        listRecursive(files, directory);
-        long totalSize = 0;
-        for (File f : files) {
-            totalSize += f.length();
-        }
-        long currentSize = 0;
-        long time = System.currentTimeMillis();
-        for (File f : files) {
-            long now = System.currentTimeMillis();
-            if (now > time + 5000) {
-                LOG.info("Uploaded {" + currentSize + "}/{" + totalSize + "}");
-                time = now;
-            }
-            currentSize += f.length();
-            String name = f.getName();
-            LOG.debug("upload file = " + name);
-            if (!name.startsWith(TMP) && !name.endsWith(DS_STORE)
-                && f.length() > 0) {
-                loadFileToBackEnd(f);
-            }
-        }
-        LOG.info("Uploaded {" + currentSize + "}/{" + totalSize + "}");
-    }
-
-    /**
-     * Traverse recursively and populate list with files.
-     */
-    private void listRecursive(List list, File file) {
-        File[] files = file.listFiles();
-        if (files != null) {
-            for (File f : files) {
-                if (f.isDirectory()) {
-                    listRecursive(list, f);
-                } else {
-                    list.add(f);
-                }
-            }
-        }
-    }
-    /**
-     * Upload file from {@link LocalCache} to {@link Backend}. 
-     * @param f file to uploaded.
-     * @throws DataStoreException
-     */
-    private void loadFileToBackEnd(File f) throws DataStoreException {
-        DataIdentifier identifier = new DataIdentifier(f.getName());
-        usesIdentifier(identifier);
-        backend.write(identifier, f);
-        LOG.debug(f.getName() + "uploaded.");
-
-    }
-
-    /**
-     * Derive file name from identifier.
-     */
-    private static String getFileName(DataIdentifier identifier) {
-        String name = identifier.toString();
-        name = name.substring(0, 2) + "/" + name.substring(2, 4) + "/"
-            + name.substring(4, 6) + "/" + name;
-        return name;
-    }
-
-    private void usesIdentifier(DataIdentifier identifier) {
-        inUse.put(identifier, new WeakReference(identifier));
-    }
-
-    private static boolean mkdirs(File dir) throws IOException {
-        if (dir.exists()) {
-            if (dir.isFile()) {
-                throw new IOException("Can not create a directory "
-                    + "because a file exists with the same name: "
-                    + dir.getAbsolutePath());
-            }
-            return false;
-        }
-        boolean created = dir.mkdirs();
-        if (!created) {
-            throw new IOException("Could not create directory: "
-                + dir.getAbsolutePath());
-        }
-        return created;
-    }
-
-    @Override
-    public void clearInUse() {
-        inUse.clear();
-    }
-
-    @Override
-    public void close() throws DataStoreException {
-        cache.close();
-        backend.close();
-        cache = null;
-    }
-
-    /**
-     * Setter for configuration based secret
-     * 
-     * @param secret the secret used to sign reference binaries
-     */
-    public void setSecret(String secret) {
-        this.secret = secret;
-    }
-
-    /**
-     * Set the minimum object length.
-     * 
-     * @param minRecordLength the length
-     */
-    public void setMinRecordLength(int minRecordLength) {
-        this.minRecordLength = minRecordLength;
-    }
-
-    /**
-     * Return mininum object length.
-     */
-    @Override
-    public int getMinRecordLength() {
-        return minRecordLength;
-    }
-
-    /**
-     * Return path of configuration properties.
-     * 
-     * @return path of configuration properties.
-     */
-    public String getConfig() {
-        return config;
-    }
-
-    /**
-     * Set the configuration properties path.
-     * 
-     * @param config path of configuration properties.
-     */
-    public void setConfig(String config) {
-        this.config = config;
-    }
-    /**
-     * @return  size of {@link LocalCache}. 
-     */
-    public long getCacheSize() {
-        return cacheSize;
-    }
-    /**
-     * Set size of {@link LocalCache}.
-     * @param cacheSize size of {@link LocalCache}.  
-     */
-    public void setCacheSize(long cacheSize) {
-        this.cacheSize = cacheSize;
-    }
-    /**
-     * 
-     * @return path of {@link LocalCache}.
-     */
-    public String getPath() {
-        return path;
-    }
-    /**
-     * Set path of {@link LocalCache}.
-     * @param path of {@link LocalCache}.
-     */
-    public void setPath(String path) {
-        this.path = path;
-    }
-
-    /**
-     * @return Purge trigger factor of {@link LocalCache}.
-     */
-    public double getCachePurgeTrigFactor() {
-        return cachePurgeTrigFactor;
-    }
-
-    /**
-     * Set purge trigger factor of {@link LocalCache}.
-     * @param cachePurgeTrigFactor purge trigger factor.
-     */
-    public void setCachePurgeTrigFactor(double cachePurgeTrigFactor) {
-        this.cachePurgeTrigFactor = cachePurgeTrigFactor;
-    }
-
-    /**
-     * @return Purge resize factor of {@link LocalCache}.
-     */
-    public double getCachePurgeResizeFactor() {
-        return cachePurgeResizeFactor;
-    }
-
-    /**
-     * Set purge resize factor of {@link LocalCache}.
-     * @param cachePurgeResizeFactor purge resize factor.
-     */
-    public void setCachePurgeResizeFactor(double cachePurgeResizeFactor) {
-        this.cachePurgeResizeFactor = cachePurgeResizeFactor;
-    }
-
-}
Index: jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/S3Backend.java
===================================================================
--- jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/S3Backend.java	(revision 1564627)
+++ jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/S3Backend.java	(working copy)
@@ -32,6 +32,8 @@
 
 import org.apache.jackrabbit.aws.ext.S3Constants;
 import org.apache.jackrabbit.aws.ext.Utils;
+import org.apache.jackrabbit.core.data.Backend;
+import org.apache.jackrabbit.core.data.CachingDataStore;
 import org.apache.jackrabbit.core.data.DataIdentifier;
 import org.apache.jackrabbit.core.data.DataStoreException;
 import org.slf4j.Logger;
@@ -340,7 +342,7 @@
             for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
                 DataIdentifier identifier = new DataIdentifier(
                     getIdentifierName(s3ObjSumm.getKey()));
-                if (!store.inUse.containsKey(identifier)
+                if (!store.isInUse(identifier)
                     && s3ObjSumm.getLastModified().getTime() < min) {
                     LOG.info("add id :" + s3ObjSumm.getKey()
                         + " to delete lists");
Index: jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/S3DataStore.java
===================================================================
--- jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/S3DataStore.java	(revision 1564627)
+++ jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/S3DataStore.java	(working copy)
@@ -16,6 +16,9 @@
  */
 package org.apache.jackrabbit.aws.ext.ds;
 
+import org.apache.jackrabbit.core.data.Backend;
+import org.apache.jackrabbit.core.data.CachingDataStore;
+
 /**
  * An Amazon S3 data store.
  */
Index: jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/LocalCache.java
===================================================================
--- jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/LocalCache.java	(revision 1564627)
+++ jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/LocalCache.java	(working copy)
@@ -1,535 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.jackrabbit.aws.ext;
-
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Set;
-
-import javax.jcr.RepositoryException;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
-import org.apache.jackrabbit.core.data.LazyFileInputStream;
-import org.apache.jackrabbit.util.TransientFileFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements a LRU cache used by {@link CachingDataStore}. If cache
- * size exceeds limit, this cache goes in purge mode. In purge mode any
- * operation to cache is no-op. After purge cache size would be less than
- * cachePurgeResizeFactor * maximum size.
- */
-public class LocalCache {
-
-    /**
-     * Logger instance.
-     */
-    static final Logger LOG = LoggerFactory.getLogger(LocalCache.class);
-
-    /**
-     * The file names of the files that need to be deleted.
-     */
-    final Set toBeDeleted = new HashSet();
-
-    /**
-     * The filename Vs file size LRU cache.
-     */
-    LRUCache cache;
-
-    /**
-     * The directory where the files are created.
-     */
-    private final File directory;
-
-    /**
-     * The directory where tmp files are created.
-     */
-    private final File tmp;
-
-    /**
-     * The maximum size of cache in bytes.
-     */
-    private long maxSize;
-
-    /**
-     * If true cache is in purgeMode and not available. All operation would be
-     * no-op.
-     */
-    private volatile boolean purgeMode;
-
-    /**
-     * Build LRU cache of files located at 'path'. It uses lastModified property
-     * of file to build LRU cache. If cache size exceeds limit size, this cache
-     * goes in purge mode. In purge mode any operation to cache is no-op.
-     * 
-     * @param path file system path
-     * @param tmpPath temporary directory used by cache.
-     * @param maxSize maximum size of cache.
-     * @param cachePurgeTrigFactor factor which triggers cache to purge mode.
-     * That is if current size exceed (cachePurgeTrigFactor * maxSize), the
-     * cache will go in auto-purge mode.
-     * @param cachePurgeResizeFactor after cache purge size of cache will be
-     * just less (cachePurgeResizeFactor * maxSize).
-     * @throws RepositoryException
-     */
-    public LocalCache(final String path, final String tmpPath,
-            final long maxSize, final double cachePurgeTrigFactor,
-            final double cachePurgeResizeFactor) throws RepositoryException {
-        this.maxSize = maxSize;
-        directory = new File(path);
-        tmp = new File(tmpPath);
-        cache = new LRUCache(maxSize, cachePurgeTrigFactor,
-            cachePurgeResizeFactor);
-        ArrayList allFiles = new ArrayList();
-
-        Iterator it = FileUtils.iterateFiles(directory, null, true);
-        while (it.hasNext()) {
-            File f = it.next();
-            allFiles.add(f);
-        }
-        Collections.sort(allFiles, new Comparator() {
-            @Override
-            public int compare(final File o1, final File o2) {
-                long l1 = o1.lastModified(), l2 = o2.lastModified();
-                return l1 < l2 ? -1 : l1 > l2 ? 1 : 0;
-            }
-        });
-        String dataStorePath = directory.getAbsolutePath();
-        long time = System.currentTimeMillis();
-        int count = 0;
-        int deletecount = 0;
-        for (File f : allFiles) {
-            if (f.exists()) {
-                long length = f.length();
-                String name = f.getPath();
-                if (name.startsWith(dataStorePath)) {
-                    name = name.substring(dataStorePath.length());
-                }
-                // convert to java path format
-                name = name.replace("\\", "/");
-                if (name.startsWith("/") || name.startsWith("\\")) {
-                    name = name.substring(1);
-                }
-                if ((cache.currentSizeInBytes + length) < cache.maxSizeInBytes) {
-                    count++;
-                    cache.put(name, length);
-                } else {
-                    if (tryDelete(name)) {
-                        deletecount++;
-                    }
-                }
-                long now = System.currentTimeMillis();
-                if (now > time + 5000) {
-                    LOG.info("Processed {" + (count + deletecount) + "}/{"
-                        + allFiles.size() + "}");
-                    time = now;
-                }
-            }
-        }
-        LOG.info("Cached {" + count + "}/{" + allFiles.size()
-            + "} , currentSizeInBytes = " + cache.currentSizeInBytes);
-        LOG.info("Deleted {" + deletecount + "}/{" + allFiles.size()
-            + "} files .");
-    }
-
-    /**
-     * Store an item in the cache and return the input stream. If cache is in
-     * purgeMode or file doesn't exists, inputstream from a
-     * {@link TransientFileFactory#createTransientFile(String, String, File)} is
-     * returned. Otherwise inputStream from cached file is returned. This method
-     * doesn't close the incoming inputstream.
-     * 
-     * @param fileName the key of cache.
-     * @param in the inputstream.
-     * @return the (new) input stream.
-     */
-    public synchronized InputStream store(String fileName, final InputStream in)
-            throws IOException {
-        fileName = fileName.replace("\\", "/");
-        File f = getFile(fileName);
-        long length = 0;
-        if (!f.exists() || isInPurgeMode()) {
-            OutputStream out = null;
-            File transFile = null;
-            try {
-                TransientFileFactory tff = TransientFileFactory.getInstance();
-                transFile = tff.createTransientFile("s3-", "tmp", tmp);
-                out = new BufferedOutputStream(new FileOutputStream(transFile));
-                length = IOUtils.copyLarge(in, out);
-            } finally {
-                IOUtils.closeQuietly(out);
-            }
-            // rename the file to local fs cache
-            if (canAdmitFile(length)
-                && (f.getParentFile().exists() || f.getParentFile().mkdirs())
-                && transFile.renameTo(f) && f.exists()) {
-                if (transFile.exists() && transFile.delete()) {
-                    LOG.warn("tmp file = " + transFile.getAbsolutePath()
-                        + " not deleted successfully");
-                }
-                transFile = null;
-                toBeDeleted.remove(fileName);
-                if (cache.get(fileName) == null) {
-                    cache.put(fileName, f.length());
-                }
-            } else {
-                f = transFile;
-            }
-        } else {
-            // f.exists and not in purge mode
-            f.setLastModified(System.currentTimeMillis());
-            toBeDeleted.remove(fileName);
-            if (cache.get(fileName) == null) {
-                cache.put(fileName, f.length());
-            }
-        }
-        cache.tryPurge();
-        return new LazyFileInputStream(f);
-    }
-
-    /**
-     * Store an item along with file in cache. Cache size is increased by
-     * {@link File#length()} If file already exists in cache,
-     * {@link File#setLastModified(long)} is updated with current time.
-     * 
-     * @param fileName the key of cache.
-     * @param src file to be added to cache.
-     * @throws IOException
-     */
-    public synchronized void store(String fileName, final File src)
-            throws IOException {
-        fileName = fileName.replace("\\", "/");
-        File dest = getFile(fileName);
-        File parent = dest.getParentFile();
-        if (src.exists() && !dest.exists() && !src.equals(dest)
-            && canAdmitFile(src.length())
-            && (parent.exists() || parent.mkdirs()) && (src.renameTo(dest))) {
-            toBeDeleted.remove(fileName);
-            if (cache.get(fileName) == null) {
-                cache.put(fileName, dest.length());
-            }
-
-        } else if (dest.exists()) {
-            dest.setLastModified(System.currentTimeMillis());
-            toBeDeleted.remove(fileName);
-            if (cache.get(fileName) == null) {
-                cache.put(fileName, dest.length());
-            }
-        }
-        cache.tryPurge();
-    }
-
-    /**
-     * Return the inputstream from from cache, or null if not in the cache.
-     * 
-     * @param fileName name of file.
-     * @return  stream or null.
-     */
-    public InputStream getIfStored(String fileName) throws IOException {
-
-        fileName = fileName.replace("\\", "/");
-        File f = getFile(fileName);
-        synchronized (this) {
-            if (!f.exists() || isInPurgeMode()) {
-                log("purgeMode true or file doesn't exists: getIfStored returned");
-                return null;
-            }
-            f.setLastModified(System.currentTimeMillis());
-            return new LazyFileInputStream(f);
-        }
-    }
-
-    /**
-     * Delete file from cache. Size of cache is reduced by file length. The
-     * method is no-op if file doesn't exist in cache.
-     * 
-     * @param fileName file name that need to be removed from cache.
-     */
-    public synchronized void delete(String fileName) {
-        if (isInPurgeMode()) {
-            log("purgeMode true :delete returned");
-            return;
-        }
-        fileName = fileName.replace("\\", "/");
-        cache.remove(fileName);
-    }
-
-    /**
-     * Returns length of file if exists in cache else returns null.
-     * @param fileName name of the file.
-     */
-    public Long getFileLength(String fileName) {
-        fileName = fileName.replace("\\", "/");
-        File f = getFile(fileName);
-        synchronized (this) {
-            if (!f.exists() || isInPurgeMode()) {
-                log("purgeMode true or file doesn't exists: getFileLength returned");
-                return null;
-            }
-            f.setLastModified(System.currentTimeMillis());
-            return f.length();
-        }
-    }
-
-    /**
-     * Close the cache. Cache maintain set of files which it was not able to
-     * delete successfully. This method will an attempt to delete all
-     * unsuccessful delete files.
-     */
-    public void close() {
-        log("close");
-        deleteOldFiles();
-    }
-
-    /**
-     * Check if cache can admit file of given length.
-     * @param length of the file.
-     * @return true if yes else return false.
-     */
-    private synchronized boolean canAdmitFile(final long length) {
-        // order is important here
-        boolean value = !isInPurgeMode() && cache.canAdmitFile(length);
-        if (!value) {
-            log("cannot admit file of length=" + length
-                + " and currentSizeInBytes=" + cache.currentSizeInBytes);
-        }
-        return value;
-    }
-
-    /**
-     * Return true if cache is in purge mode else return false.
-     */
-    synchronized boolean isInPurgeMode() {
-        return purgeMode || maxSize == 0;
-    }
-
-    /**
-     * Set purge mode. If set to true all cache operation will be no-op. If set
-     * to false, all operations to cache are available.
-     * 
-     * @param purgeMode purge mode
-     */
-    synchronized void setPurgeMode(final boolean purgeMode) {
-        this.purgeMode = purgeMode;
-    }
-
-    File getFile(final String fileName) {
-        return new File(directory, fileName);
-    }
-
-    private void deleteOldFiles() {
-        int initialSize = toBeDeleted.size();
-        int count = 0;
-        for (String n : new ArrayList(toBeDeleted)) {
-            if (tryDelete(n)) {
-                count++;
-            }
-        }
-        LOG.info("deleted [" + count + "]/[" + initialSize + "] files");
-    }
-
-    /**
-     * This method tries to delete a file. If it is not able to delete file due
-     * to any reason, it add it toBeDeleted list.
-     * 
-     * @param fileName name of the file which will be deleted.
-     * @return true if this method deletes file successfuly else return false.
-     */
-    boolean tryDelete(final String fileName) {
-        log("cache delete " + fileName);
-        File f = getFile(fileName);
-        if (f.exists() && f.delete()) {
-            log(fileName + "  deleted successfully");
-            toBeDeleted.remove(fileName);
-            while (true) {
-                f = f.getParentFile();
-                if (f.equals(directory) || f.list().length > 0) {
-                    break;
-                }
-                // delete empty parent folders (except the main directory)
-                f.delete();
-            }
-            return true;
-        } else if (f.exists()) {
-            LOG.info("not able to delete file = " + f.getAbsolutePath());
-            toBeDeleted.add(fileName);
-            return false;
-        }
-        return true;
-    }
-
-    static int maxSizeElements(final long bytes) {
-        // after a CQ installation, the average item in
-        // the data store is about 52 KB
-        int count = (int) (bytes / 65535);
-        count = Math.max(1024, count);
-        count = Math.min(64 * 1024, count);
-        return count;
-    }
-
-    static void log(final String s) {
-        LOG.debug(s);
-    }
-
-    /**
-     * A LRU based extension {@link LinkedHashMap}. The key is file name and
-     * value is length of file.
-     */
-    private class LRUCache extends LinkedHashMap {
-        private static final long serialVersionUID = 1L;
-
-        volatile long currentSizeInBytes;
-
-        final long maxSizeInBytes;
-
-        long cachePurgeResize;
-        
-        private long cachePurgeTrigSize;
-
-        public LRUCache(final long maxSizeInBytes,
-                final double cachePurgeTrigFactor,
-                final double cachePurgeResizeFactor) {
-            super(maxSizeElements(maxSizeInBytes), (float) 0.75, true);
-            this.maxSizeInBytes = maxSizeInBytes;
-            this.cachePurgeTrigSize = new Double(cachePurgeTrigFactor
-                * maxSizeInBytes).longValue();
-            this.cachePurgeResize = new Double(cachePurgeResizeFactor
-                * maxSizeInBytes).longValue();
-        }
-
-        /**
-         * Overridden {@link Map#remove(Object)} to delete corresponding file
-         * from file system.
-         */
-        @Override
-        public synchronized Long remove(final Object key) {
-            String fileName = (String) key;
-            fileName = fileName.replace("\\", "/");
-            Long flength = null;
-            if (tryDelete(fileName)) {
-                flength = super.remove(key);
-                if (flength != null) {
-                    log("cache entry { " + fileName + "} with size {" + flength
-                        + "} removed.");
-                    currentSizeInBytes -= flength.longValue();
-                }
-            } else if (!getFile(fileName).exists()) {
-                // second attempt. remove from cache if file doesn't exists
-                flength = super.remove(key);
-                if (flength != null) {
-                    log(" file not exists. cache entry { " + fileName
-                        + "} with size {" + flength + "} removed.");
-                    currentSizeInBytes -= flength.longValue();
-                }
-            }
-            return flength;
-        }
-
-        @Override
-        public synchronized Long put(final String key, final Long value) {
-            long flength = value.longValue();
-            currentSizeInBytes += flength;
-            return super.put(key.replace("\\", "/"), value);
-        }
-
-        /**
-         * This method tries purging of local cache. It checks if local cache
-         * has exceeded the defined limit then it triggers purge cache job in a
-         * seperate thread.
-         */
-        synchronized void tryPurge() {
-            if (currentSizeInBytes > cachePurgeTrigSize && !isInPurgeMode()) {
-                setPurgeMode(true);
-                LOG.info("currentSizeInBytes[" + cache.currentSizeInBytes
-                    + "] exceeds (cachePurgeTrigSize)["
-                    + cache.cachePurgeTrigSize + "]");
-                new Thread(new PurgeJob()).start();
-            }
-        }
-        /**
-         * This method check if cache can admit file of given length. 
-         * @param length length of file.
-         * @return true if cache size + length is less than maxSize.
-         */
-        synchronized boolean canAdmitFile(final long length) {
-            return cache.currentSizeInBytes + length < cache.maxSizeInBytes;
-        }
-    }
-
-    /**
-     * This class performs purging of local cache. It implements
-     * {@link Runnable} and should be invoked in a separate thread.
-     */
-    private class PurgeJob implements Runnable {
-        public PurgeJob() {
-            // TODO Auto-generated constructor stub
-        }
-
-        /**
-         * This method purges local cache till its size is less than
-         * cacheResizefactor * maxSize
-         */
-        @Override
-        public void run() {
-            try {
-                synchronized (cache) {
-                    LOG.info(" cache purge job started");
-                    // first try to delete toBeDeleted files
-                    int initialSize = cache.size();
-                    for (String fileName : new ArrayList(toBeDeleted)) {
-                        cache.remove(fileName);
-                    }
-                    Iterator> itr = cache.entrySet().iterator();
-                    while (itr.hasNext()) {
-                        Map.Entry entry = itr.next();
-                        if (entry.getKey() != null) {
-                            if (cache.currentSizeInBytes > cache.cachePurgeResize) {
-                                itr.remove();
-
-                            } else {
-                                break;
-                            }
-                        }
-
-                    }
-                    LOG.info(" cache purge job completed: cleaned ["
-                        + (initialSize - cache.size())
-                        + "] files and currentSizeInBytes = [ "
-                        + cache.currentSizeInBytes + "]");
-                }
-            } catch (Exception e) {
-                LOG.error("error in purge jobs:", e);
-            } finally {
-                setPurgeMode(false);
-            }
-        }
-    }
-}
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/InMemoryBackend.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/InMemoryBackend.java	(revision 1564627)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/InMemoryBackend.java	(working copy)
@@ -29,6 +29,8 @@
 import java.util.List;
 import java.util.Map;
 
+import org.apache.jackrabbit.core.data.Backend;
+import org.apache.jackrabbit.core.data.CachingDataStore;
 import org.apache.jackrabbit.core.data.DataIdentifier;
 import org.apache.jackrabbit.core.data.DataStoreException;
 
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/InMemoryDataStore.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/InMemoryDataStore.java	(revision 1564627)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/InMemoryDataStore.java	(working copy)
@@ -16,6 +16,9 @@
  */
 package org.apache.jackrabbit.aws.ext.ds;
 
+import org.apache.jackrabbit.core.data.Backend;
+import org.apache.jackrabbit.core.data.CachingDataStore;
+
 /**
  * A caching data store that uses the in-memory backend.
  */
@@ -22,13 +25,12 @@
 public class InMemoryDataStore extends CachingDataStore {
 
     @Override
-    Backend createBackend() {
+    protected Backend createBackend() {
         return new InMemoryBackend();
     }
 
     @Override
-    String getMarkerFile() {
+    protected String getMarkerFile() {
         return "mem.init.done";
     }
-
 }
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestCaseBase.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestCaseBase.java	(revision 1564627)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestCaseBase.java	(working copy)
@@ -32,6 +32,7 @@
 import junit.framework.TestCase;
 
 import org.apache.commons.io.FileUtils;
+import org.apache.jackrabbit.core.data.CachingDataStore;
 import org.apache.jackrabbit.core.data.DataIdentifier;
 import org.apache.jackrabbit.core.data.DataRecord;
 import org.apache.jackrabbit.core.data.DataStore;
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestInMemDs.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestInMemDs.java	(revision 1564627)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestInMemDs.java	(working copy)
@@ -16,6 +16,8 @@
  */
 package org.apache.jackrabbit.aws.ext.ds;
 
+import org.apache.jackrabbit.core.data.CachingDataStore;
+
 /**
  * Test {@link CachingDataStore} with InMemoryBackend and local cache on.
  */
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestInMemDsCacheOff.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestInMemDsCacheOff.java	(revision 1564627)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestInMemDsCacheOff.java	(working copy)
@@ -16,6 +16,8 @@
  */
 package org.apache.jackrabbit.aws.ext.ds;
 
+import org.apache.jackrabbit.core.data.CachingDataStore;
+
 /**
  * Test {@link CachingDataStore} with InMemoryBackend and local cache off.
  */
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestS3Ds.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestS3Ds.java	(revision 1564627)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestS3Ds.java	(working copy)
@@ -20,6 +20,7 @@
 import java.util.Properties;
 
 import org.apache.jackrabbit.aws.ext.Utils;
+import org.apache.jackrabbit.core.data.CachingDataStore;
 
 /**
  * Test {@link CachingDataStore} with S3Backend and local cache on. It requires
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestS3DsCacheOff.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestS3DsCacheOff.java	(revision 1564627)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestS3DsCacheOff.java	(working copy)
@@ -16,6 +16,8 @@
  */
 package org.apache.jackrabbit.aws.ext.ds;
 
+import org.apache.jackrabbit.core.data.CachingDataStore;
+
 /**
  * Test {@link CachingDataStore} with S3Backend and local cache Off. It requires
  * to pass aws config file via system property. For e.g.
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/TestLocalCache.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/TestLocalCache.java	(revision 1564627)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/TestLocalCache.java	(working copy)
@@ -27,6 +27,7 @@
 import junit.framework.TestCase;
 
 import org.apache.commons.io.FileUtils;
+import org.apache.jackrabbit.core.data.LocalCache;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
Index: jackrabbit-core/pom.xml
===================================================================
--- jackrabbit-core/pom.xml	(revision 1564627)
+++ jackrabbit-core/pom.xml	(working copy)
@@ -247,6 +247,11 @@
     
     
       org.apache.jackrabbit
+      jackrabbit-data
+      ${project.version}
+        
+    
+      org.apache.jackrabbit
       jackrabbit-spi-commons
       ${project.version}
     
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/ConfigurationException.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/ConfigurationException.java	(revision 1564627)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/ConfigurationException.java	(working copy)
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.jackrabbit.core.config;
-
-import javax.jcr.RepositoryException;
-
-/**
- * Exception class used for configuration errors.
- */
-public class ConfigurationException extends RepositoryException {
-
-    /**
-     * Creates a configuration exception.
-     *
-     * @param message configuration message
-     */
-    public ConfigurationException(String message) {
-        super(message);
-    }
-
-    /**
-     * Creates a configuration exception that is caused by another exception.
-     *
-     * @param message configuration error message
-     * @param cause root cause of the configuration error
-     */
-    public ConfigurationException(String message, Exception cause) {
-        super(message, cause);
-    }
-
-}
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/DataSourceConfig.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/DataSourceConfig.java	(revision 1564627)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/config/DataSourceConfig.java	(working copy)
@@ -1,225 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.jackrabbit.core.config;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Properties;
-
-import javax.naming.Context;
-
-/**
- * This class contains list of definitions for {@code DataSource} instances.
- */
-public class DataSourceConfig {
-
-    public static final String DRIVER = "driver";
-
-    public static final String URL = "url";
-
-    public static final String USER = "user";
-
-    public static final String PASSWORD = "password";
-
-    public static final String DB_TYPE = "databaseType";
-
-    public static final String VALIDATION_QUERY = "validationQuery";
-
-    public static final String MAX_POOL_SIZE = "maxPoolSize";
-
-    private final List defs = new ArrayList();
-
-    /**
-     * Adds a DataSourceDefinition from the given properties.
-     * 
-     * @param props the properties (key and values must be strings)
-     * @throws ConfigurationException on error
-     */
-    public void addDataSourceDefinition(String name, Properties props) throws ConfigurationException {
-        DataSourceDefinition def = new DataSourceDefinition(name, props);
-        for (DataSourceDefinition existing : defs) {
-            if (existing.getLogicalName().equals(def.getLogicalName())) {
-                throw new ConfigurationException("Duplicate logicalName for a DataSource: "
-                        + def.getLogicalName());
-            }
-        }
-        defs.add(def);
-    }
-
-    /**
-     * @return the unmodifiable list of the current {@link DataSourceDefinition}s
-     */
-    public List getDefinitions() {
-        return Collections.unmodifiableList(defs);
-    }
-
-    /**
-     * The definition of a DataSource. 
-     */
-    public static final class DataSourceDefinition {
-
-        private static final List allPropNames =
-            Arrays.asList(DRIVER, URL, USER, PASSWORD, DB_TYPE, VALIDATION_QUERY, MAX_POOL_SIZE);
-
-        private static final List allJndiPropNames =
-            Arrays.asList(DRIVER, URL, USER, PASSWORD, DB_TYPE);
-
-        private final String logicalName;
-
-        private final String driver;
-
-        private final String url;
-
-        private final String user;
-
-        private final String password;
-
-        private final String dbType;
-
-        private final String validationQuery;
-
-        private final int maxPoolSize;
-
-        /**
-         * Creates a DataSourceDefinition from the given properties and 
-         * throws a {@link ConfigurationException} when the set of properties does not
-         * satisfy some validity constraints.
-         * 
-         * @param name the logical name of the data source
-         * @param props the properties (string keys and values)
-         * @throws ConfigurationException on error
-         */
-        public DataSourceDefinition(String name, Properties props) throws ConfigurationException {
-            this.logicalName = name;
-            this.driver = (String) props.getProperty(DRIVER);
-            this.url = (String) props.getProperty(URL);
-            this.user = (String) props.getProperty(USER);
-            this.password = (String) props.getProperty(PASSWORD);
-            this.dbType = (String) props.getProperty(DB_TYPE);
-            this.validationQuery = (String) props.getProperty(VALIDATION_QUERY);
-            try {
-                this.maxPoolSize = Integer.parseInt((String) props.getProperty(MAX_POOL_SIZE, "-1"));
-            } catch (NumberFormatException e) {
-                throw new ConfigurationException("failed to parse " + MAX_POOL_SIZE
-                        + " property for DataSource " + logicalName);
-            }
-            verify(props);
-        }
-
-        private void verify(Properties props) throws ConfigurationException {
-            // Check required properties
-            if (logicalName == null || "".equals(logicalName.trim())) {
-                throw new ConfigurationException("DataSource logical name must not be null or empty");
-            }
-            if (driver == null || "".equals(driver)) {
-                throw new ConfigurationException("DataSource driver must not be null or empty");
-            }
-            if (url == null || "".equals(url)) {
-                throw new ConfigurationException("DataSource URL must not be null or empty");
-            }
-            if (dbType == null || "".equals(dbType)) {
-                throw new ConfigurationException("DataSource databaseType must not be null or empty");
-            }
-            // Check unknown properties
-            for (Object propName : props.keySet()) {
-                if (!allPropNames.contains((String) propName)) {
-                    throw new ConfigurationException("Unknown DataSource property: " + propName);
-                }
-            }
-            // Check JNDI config:
-            if (isJndiConfig()) {
-                for (Object propName : props.keySet()) {
-                    if (!allJndiPropNames.contains((String) propName)) {
-                        throw new ConfigurationException("Property " + propName
-                                + " is not allowed for a DataSource obtained through JNDI"
-                                + ", DataSource logicalName = " + logicalName);
-                    }
-                }
-            }
-        }
-
-        private boolean isJndiConfig() throws ConfigurationException {
-            Class driverClass = null;
-            try {
-                if (driver.length() > 0) {
-                    driverClass = Class.forName(driver);
-                }
-            } catch (ClassNotFoundException e) {
-                throw new ConfigurationException("Could not load JDBC driver class " + driver, e);
-            }
-            return driverClass != null && Context.class.isAssignableFrom(driverClass);
-        }
-
-        /**
-         * @return the logicalName
-         */
-        public String getLogicalName() {
-            return logicalName;
-        }
-
-        /**
-         * @return the driver
-         */
-        public String getDriver() {
-            return driver;
-        }
-
-        /**
-         * @return the url
-         */
-        public String getUrl() {
-            return url;
-        }
-
-        /**
-         * @return the user
-         */
-        public String getUser() {
-            return user;
-        }
-
-        /**
-         * @return the dbType
-         */
-        public String getDbType() {
-            return dbType;
-        }
-
-        /**
-         * @return the password
-         */
-        public String getPassword() {
-            return password;
-        }
-
-        /**
-         * @return the validationQuery
-         */
-        public String getValidationQuery() {
-            return validationQuery;
-        }
-
-        /**
-         * @return the maxPoolSize
-         */
-        public int getMaxPoolSize() {
-            return maxPoolSize;
-        }
-    }
-}
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/AbstractDataRecord.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/AbstractDataRecord.java	(revision 1564627)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/AbstractDataRecord.java	(working copy)
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.jackrabbit.core.data;
-
-/**
- * Abstract data record base class. This base class contains only
- * a reference to the data identifier of the record and implements
- * the standard {@link Object} equality, hash code, and string
- * representation methods based on the identifier.
- */
-public abstract class AbstractDataRecord implements DataRecord {
-
-    /**
-     * The data store that contains this record.
-     */
-    private final AbstractDataStore store;
-
-    /**
-     * The binary identifier;
-     */
-    private final DataIdentifier identifier;
-
-    /**
-     * Creates a data record with the given identifier.
-     *
-     * @param identifier data identifier
-     */
-    public AbstractDataRecord(
-            AbstractDataStore store, DataIdentifier identifier) {
-        this.store = store;
-        this.identifier = identifier;
-    }
-
-    /**
-     * Returns the data identifier.
-     *
-     * @return data identifier
-     */
-    public DataIdentifier getIdentifier() {
-        return identifier;
-    }
-
-    public String getReference() {
-        return store.getReferenceFromIdentifier(identifier);
-    }
-
-    /**
-     * Returns the string representation of the data identifier.
-     *
-     * @return string representation
-     */
-    public String toString() {
-        return identifier.toString();
-    }
-
-    /**
-     * Checks if the given object is a data record with the same identifier
-     * as this one.
-     *
-     * @param object other object
-     * @return true if the other object is a data record and has
-     *         the same identifier as this one, false otherwise
-     */
-    public boolean equals(Object object) {
-        return (object instanceof DataRecord)
-            && identifier.equals(((DataRecord) object).getIdentifier());
-    }
-
-    /**
-     * Returns the hash code of the data identifier.
-     *
-     * @return hash code
-     */
-    public int hashCode() {
-        return identifier.hashCode();
-    }
-
-}
Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/AbstractDataStore.java
===================================================================
--- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/AbstractDataStore.java	(revision 1564627)
+++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/AbstractDataStore.java	(working copy)
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.jackrabbit.core.data;
-
-import java.security.SecureRandom;
-import javax.crypto.Mac;
-import javax.crypto.spec.SecretKeySpec;
-
-public abstract class AbstractDataStore implements DataStore {
-
-    private static final String ALGORITHM = "HmacSHA1";
-
-    /**
-     * Array of hexadecimal digits.
-     */
-    private static final char[] HEX = "0123456789abcdef".toCharArray();
-
-    /**
-     * Cached copy of the reference key of this data store. Initialized in
-     * {@link #getReferenceKey()} when the key is first accessed.
-     */
-    private byte[] referenceKey = null;
-
-    //---------------------------------------------------------< DataStore >--
-
-    public DataRecord getRecord(DataIdentifier identifier)
-            throws DataStoreException {
-        DataRecord record = getRecordIfStored(identifier);
-        if (record != null) {
-            return record;
-        } else {
-            throw new DataStoreException(
-                    "Record " + identifier + " does not exist");
-        }
-    }
-
-    public DataRecord getRecordFromReference(String reference)
-            throws DataStoreException {
-        if (reference != null) {
-            int colon = reference.indexOf(':');
-            if (colon != -1) {
-                DataIdentifier identifier =
-                        new DataIdentifier(reference.substring(0, colon));
-                if (reference.equals(getReferenceFromIdentifier(identifier))) {
-                    return getRecordIfStored(identifier);
-                }
-            }
-        }
-        return null;
-    }
-
-    //---------------------------------------------------------< protected >--
-
-    /**
-     * Returns the hex encoding of the given bytes.
-     *
-     * @param value value to be encoded
-     * @return encoded value
-     */
-    protected static String encodeHexString(byte[] value) {
-        char[] buffer = new char[value.length * 2];
-        for (int i = 0; i < value.length; i++) {
-            buffer[2 * i] = HEX[(value[i] >> 4) & 0x0f];
-            buffer[2 * i + 1] = HEX[value[i] & 0x0f];
-        }
-        return new String(buffer);
-    }
-
-    protected String getReferenceFromIdentifier(DataIdentifier identifier) {
-        try {
-            String id = identifier.toString();
-
-            Mac mac = Mac.getInstance(ALGORITHM);
-            mac.init(new SecretKeySpec(getReferenceKey(), ALGORITHM));
-            byte[] hash = mac.doFinal(id.getBytes("UTF-8"));
-
-            return id + ':' + encodeHexString(hash);
-        } catch (Exception e) {
-            // TODO: log a warning about this exception
-        }
-        return null;
-    }
-
-    /**
-     * Returns the reference key of this data store. If one does not already
-     * exist, it is automatically created in an implementation-specific way.
-     * The default implementation simply creates a temporary random key that's
-     * valid only until the data store gets restarted. Subclasses can override
-     * and/or decorate this method to support a more persistent reference key.
-     * 

- * This method is called only once during the lifetime of a data store - * instance and the return value is cached in memory, so it's no problem - * if the implementation is slow. - * - * @return reference key - * @throws DataStoreException if the key is not available - */ - protected byte[] getOrCreateReferenceKey() throws DataStoreException { - byte[] referenceKeyValue = new byte[256]; - new SecureRandom().nextBytes(referenceKeyValue); - return referenceKeyValue; - } - - //-----------------------------------------------------------< private >-- - - /** - * Returns the reference key of this data store. Synchronized to - * control concurrent access to the cached {@link #referenceKey} value. - * - * @return reference key - * @throws DataStoreException if the key is not available - */ - private synchronized byte[] getReferenceKey() throws DataStoreException { - if (referenceKey == null) { - referenceKey = getOrCreateReferenceKey(); - } - return referenceKey; - } - -} \ No newline at end of file Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/DataIdentifier.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/DataIdentifier.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/DataIdentifier.java (working copy) @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data; - -import java.io.Serializable; - -/** - * Opaque data identifier used to identify records in a data store. - * All identifiers must be serializable and implement the standard - * object equality and hash code methods. - */ -public class DataIdentifier implements Serializable { - - /** - * Serial version UID. - */ - private static final long serialVersionUID = -9197191401131100016L; - - /** - * Data identifier. - */ - private final String identifier; - - /** - * Creates a data identifier from the given string. - * - * @param identifier data identifier - */ - public DataIdentifier(String identifier) { - this.identifier = identifier; - } - - //-------------------------------------------------------------< Object > - - /** - * Returns the identifier string. - * - * @return identifier string - */ - public String toString() { - return identifier; - } - - /** - * Checks if the given object is a data identifier and has the same - * string representation as this one. - * - * @param object other object - * @return true if the given object is the same identifier, - * false otherwise - */ - public boolean equals(Object object) { - return (object instanceof DataIdentifier) - && identifier.equals(object.toString()); - } - - /** - * Returns the hash code of the identifier string. - * - * @return hash code - */ - public int hashCode() { - return identifier.hashCode(); - } - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/DataRecord.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/DataRecord.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/DataRecord.java (working copy) @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data; - -import java.io.InputStream; - -/** - * Immutable data record that consists of a binary stream. - */ -public interface DataRecord { - - /** - * Returns the identifier of this record. - * - * @return data identifier - */ - DataIdentifier getIdentifier(); - - /** - * Returns a secure reference to this binary, or {@code null} if no such - * reference is available. - * - * @return binary reference, or {@code null} - */ - String getReference(); - - /** - * Returns the length of the binary stream in this record. - * - * @return length of the binary stream - * @throws DataStoreException if the record could not be accessed - */ - long getLength() throws DataStoreException; - - /** - * Returns the the binary stream in this record. - * - * @return binary stream - * @throws DataStoreException if the record could not be accessed - */ - InputStream getStream() throws DataStoreException; - - /** - * Returns the last modified of the record. - * - * @return last modified time of the binary stream - */ - long getLastModified(); -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/DataStore.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/DataStore.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/DataStore.java (working copy) @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data; - -import java.io.InputStream; -import java.util.Iterator; - -import javax.jcr.RepositoryException; - -/** - * Append-only store for binary streams. A data store consists of a number - * of identifiable data records that each contain a distinct binary stream. - * New binary streams can be added to the data store, but existing streams - * are never removed or modified. - *

- * A data store should be fully thread-safe, i.e. it should be possible to - * add and access data records concurrently. Optimally even separate processes - * should be able to concurrently access the data store with zero interprocess - * synchronization. - */ -public interface DataStore { - - /** - * Check if a record for the given identifier exists, and return it if yes. - * If no record exists, this method returns null. - * - * @param identifier data identifier - * @return the record if found, and null if not - * @throws DataStoreException if the data store could not be accessed - */ - DataRecord getRecordIfStored(DataIdentifier identifier) - throws DataStoreException; - - /** - * Returns the identified data record. The given identifier should be - * the identifier of a previously saved data record. Since records are - * never removed, there should never be cases where the identified record - * is not found. Abnormal cases like that are treated as errors and - * handled by throwing an exception. - * - * @param identifier data identifier - * @return identified data record - * @throws DataStoreException if the data store could not be accessed, - * or if the given identifier is invalid - */ - DataRecord getRecord(DataIdentifier identifier) throws DataStoreException; - - /** - * Returns the record that matches the given binary reference. - * Returns {@code null} if the reference is invalid, for example if it - * points to a record that does not exist. - * - * @param reference binary reference - * @return matching record, or {@code null} - * @throws DataStoreException if the data store could not be accessed - */ - DataRecord getRecordFromReference(String reference) - throws DataStoreException; - - /** - * Creates a new data record. The given binary stream is consumed and - * a binary record containing the consumed stream is created and returned. - * If the same stream already exists in another record, then that record - * is returned instead of creating a new one. - *

- * The given stream is consumed and not closed by this - * method. It is the responsibility of the caller to close the stream. - * A typical call pattern would be: - *

-     *     InputStream stream = ...;
-     *     try {
-     *         record = store.addRecord(stream);
-     *     } finally {
-     *         stream.close();
-     *     }
-     * 
- * - * @param stream binary stream - * @return data record that contains the given stream - * @throws DataStoreException if the data store could not be accessed - */ - DataRecord addRecord(InputStream stream) throws DataStoreException; - - /** - * From now on, update the modified date of an object even when accessing it. - * Usually, the modified date is only updated when creating a new object, - * or when a new link is added to an existing object. When this setting is enabled, - * even getLength() will update the modified date. - * - * @param before - update the modified date to the current time if it is older than this value - */ - void updateModifiedDateOnAccess(long before); - - /** - * Delete objects that have a modified date older than the specified date. - * - * @param min the minimum time - * @return the number of data records deleted - * @throws DataStoreException - */ - int deleteAllOlderThan(long min) throws DataStoreException; - - /** - * Get all identifiers. - * - * @return an iterator over all DataIdentifier objects - * @throws DataStoreException if the list could not be read - */ - Iterator getAllIdentifiers() throws DataStoreException; - - /** - * Initialized the data store - * - * @param homeDir the home directory of the repository - * @throws RepositoryException - */ - void init(String homeDir) throws RepositoryException; - - /** - * Get the minimum size of an object that should be stored in this data store. - * Depending on the overhead and configuration, each store may return a different value. - * - * @return the minimum size in bytes - */ - int getMinRecordLength(); - - /** - * Close the data store - * - * @throws DataStoreException if a problem occurred - */ - void close() throws DataStoreException; - - /** - * Clear the in-use list. This is only used for testing to make the the garbage collection - * think that objects are no longer in use. - */ - void clearInUse(); - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/DataStoreException.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/DataStoreException.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/DataStoreException.java (working copy) @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data; - -import javax.jcr.RepositoryException; - -/** - * Exception thrown by the Data Store module. - */ -public class DataStoreException extends RepositoryException { - - /** - * Constructs a new instance of this class with the specified detail - * message. - * - * @param message the detailed message. - */ - public DataStoreException(String message) { - super(message); - } - - /** - * Constructs a new instance of this class with the specified detail - * message and root cause. - * - * @param message the detailed message. - * @param cause root failure cause - */ - public DataStoreException(String message, Throwable cause) { - super(message, cause); - } - - /** - * Constructs a new instance of this class with the specified root cause. - * - * @param rootCause root failure cause - */ - public DataStoreException(Throwable rootCause) { - super(rootCause); - } - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/DataStoreFactory.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/DataStoreFactory.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/DataStoreFactory.java (working copy) @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data; - -import javax.jcr.RepositoryException; - -/** - * Factory interface for creating {@link DataStore} instances. Used - * to decouple the repository internals from the repository configuration - * mechanism. - * - * @since Jackrabbit 1.5 - * @see JCR-1438 - */ -public interface DataStoreFactory { - - /** - * Creates, initializes, and returns a {@link DataStore} instance - * for use by the repository. Note that no information is passed from - * the client, so all required configuration information must be - * encapsulated in the factory. - * - * @return initialized data store - * @throws RepositoryException if the data store can not be created - */ - DataStore getDataStore() throws RepositoryException; - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DbDataRecord.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DbDataRecord.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DbDataRecord.java (working copy) @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data.db; - -import org.apache.jackrabbit.core.data.AbstractDataRecord; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataStoreException; - -import java.io.BufferedInputStream; -import java.io.InputStream; - -/** - * Data record that is stored in a database - */ -public class DbDataRecord extends AbstractDataRecord { - - protected final DbDataStore store; - protected final long length; - protected long lastModified; - - /** - * Creates a data record based on the given identifier and length. - * - * @param identifier data identifier - * @param length the length - * @param lastModified - */ - public DbDataRecord(DbDataStore store, DataIdentifier identifier, long length, long lastModified) { - super(store, identifier); - this.store = store; - this.length = length; - this.lastModified = lastModified; - } - - /** - * {@inheritDoc} - */ - public long getLength() throws DataStoreException { - lastModified = store.touch(getIdentifier(), lastModified); - return length; - } - - /** - * {@inheritDoc} - */ - public InputStream getStream() throws DataStoreException { - lastModified = store.touch(getIdentifier(), lastModified); - return new BufferedInputStream(new DbInputStream(store, getIdentifier())); - } - - /** - * {@inheritDoc} - */ - public long getLastModified() { - return lastModified; - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DbDataStore.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DbDataStore.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DbDataStore.java (working copy) @@ -1,1000 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data.db; - -import org.apache.commons.io.input.CountingInputStream; -import org.apache.jackrabbit.core.data.AbstractDataStore; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataRecord; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.data.MultiDataStoreAware; -import org.apache.jackrabbit.core.util.db.CheckSchemaOperation; -import org.apache.jackrabbit.core.util.db.ConnectionFactory; -import org.apache.jackrabbit.core.util.db.ConnectionHelper; -import org.apache.jackrabbit.core.util.db.DatabaseAware; -import org.apache.jackrabbit.core.util.db.DbUtility; -import org.apache.jackrabbit.core.util.db.StreamWrapper; -import org.apache.jackrabbit.util.Text; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.BufferedInputStream; -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.lang.ref.WeakReference; -import java.security.DigestInputStream; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.UUID; -import java.util.WeakHashMap; - -import javax.jcr.RepositoryException; -import javax.sql.DataSource; - -/** - * A data store implementation that stores the records in a database using JDBC. - * - * Configuration: - *
- * <DataStore class="org.apache.jackrabbit.core.data.db.DbDataStore">
- *     <param name="{@link #setUrl(String) url}" value="jdbc:postgresql:test"/>
- *     <param name="{@link #setUser(String) user}" value="sa"/>
- *     <param name="{@link #setPassword(String) password}" value="sa"/>
- *     <param name="{@link #setDatabaseType(String) databaseType}" value="postgresql"/>
- *     <param name="{@link #setDriver(String) driver}" value="org.postgresql.Driver"/>
- *     <param name="{@link #setMinRecordLength(int) minRecordLength}" value="1024"/>
- *     <param name="{@link #setMaxConnections(int) maxConnections}" value="2"/>
- *     <param name="{@link #setCopyWhenReading(boolean) copyWhenReading}" value="true"/>
- *     <param name="{@link #setTablePrefix(String) tablePrefix}" value=""/>
- *     <param name="{@link #setSchemaObjectPrefix(String) schemaObjectPrefix}" value=""/>
- *     <param name="{@link #setSchemaCheckEnabled(String) schemaCheckEnabled}" value="true"/>
- * </DataStore>
- * 
- *

- * Only URL, user name and password usually need to be set. - * The remaining settings are generated using the database URL sub-protocol from the - * database type resource file. - *

- * JNDI can be used to get the connection. In this case, use the javax.naming.InitialContext as the driver, - * and the JNDI name as the URL. If the user and password are configured in the JNDI resource, - * they should not be configured here. Example JNDI settings: - *

- * <param name="driver" value="javax.naming.InitialContext" />
- * <param name="url" value="java:comp/env/jdbc/Test" />
- * 
- *

- * For Microsoft SQL Server 2005, there is a problem reading large BLOBs. You will need to use - * the JDBC driver version 1.2 or newer, and append ;responseBuffering=adaptive to the database URL. - * Don't append ;selectMethod=cursor, otherwise it can still run out of memory. - * Example database URL: jdbc:sqlserver://localhost:4220;DatabaseName=test;responseBuffering=adaptive - *

- * By default, the data is copied to a temp file when reading, to avoid problems when reading multiple - * blobs at the same time. - *

- * The tablePrefix can be used to specify a schema and / or catalog name: - * <param name="tablePrefix" value="ds."> - */ -public class DbDataStore extends AbstractDataStore - implements DatabaseAware, MultiDataStoreAware { - - /** - * The default value for the minimum object size. - */ - public static final int DEFAULT_MIN_RECORD_LENGTH = 100; - - /** - * Write to a temporary file to get the length (slow, but always works). - * This is the default setting. - */ - public static final String STORE_TEMP_FILE = "tempFile"; - - /** - * Call PreparedStatement.setBinaryStream(..., -1) - */ - public static final String STORE_SIZE_MINUS_ONE = "-1"; - - /** - * Call PreparedStatement.setBinaryStream(..., Integer.MAX_VALUE) - */ - public static final String STORE_SIZE_MAX = "max"; - - /** - * The digest algorithm used to uniquely identify records. - */ - protected static final String DIGEST = "SHA-1"; - - /** - * The prefix used for temporary objects. - */ - protected static final String TEMP_PREFIX = "TEMP_"; - - /** - * Logger instance - */ - private static Logger log = LoggerFactory.getLogger(DbDataStore.class); - - /** - * The minimum modified date. If a file is accessed (read or write) with a modified date - * older than this value, the modified date is updated to the current time. - */ - protected long minModifiedDate; - - /** - * The database URL used. - */ - protected String url; - - /** - * The database driver. - */ - protected String driver; - - /** - * The user name. - */ - protected String user; - - /** - * The password - */ - protected String password; - - /** - * The database type used. - */ - protected String databaseType; - - /** - * The minimum size of an object that should be stored in this data store. - */ - protected int minRecordLength = DEFAULT_MIN_RECORD_LENGTH; - - /** - * The prefix for the datastore table, empty by default. - */ - protected String tablePrefix = ""; - - /** - * The prefix of the table names. By default it is empty. - */ - protected String schemaObjectPrefix = ""; - - /** - * Whether the schema check must be done during initialization. - */ - private boolean schemaCheckEnabled = true; - - /** - * The logical name of the DataSource to use. - */ - protected String dataSourceName; - - /** - * This is the property 'table' - * in the [databaseType].properties file, initialized with the default value. - */ - protected String tableSQL = "DATASTORE"; - - /** - * This is the property 'createTable' - * in the [databaseType].properties file, initialized with the default value. - */ - protected String createTableSQL = - "CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA BLOB)"; - - /** - * This is the property 'insertTemp' - * in the [databaseType].properties file, initialized with the default value. - */ - protected String insertTempSQL = - "INSERT INTO ${tablePrefix}${table} VALUES(?, 0, ?, NULL)"; - - /** - * This is the property 'updateData' - * in the [databaseType].properties file, initialized with the default value. - */ - protected String updateDataSQL = - "UPDATE ${tablePrefix}${table} SET DATA=? WHERE ID=?"; - - /** - * This is the property 'updateLastModified' - * in the [databaseType].properties file, initialized with the default value. - */ - protected String updateLastModifiedSQL = - "UPDATE ${tablePrefix}${table} SET LAST_MODIFIED=? WHERE ID=? AND LAST_MODIFIED> inUse = - Collections.synchronizedMap(new WeakHashMap>()); - - /** - * The temporary identifiers that are currently in use. - */ - protected List temporaryInUse = Collections.synchronizedList(new ArrayList()); - - /** - * The {@link ConnectionHelper} set in the {@link #init(String)} method. - * */ - protected ConnectionHelper conHelper; - - /** - * The repositories {@link ConnectionFactory}. - */ - private ConnectionFactory connectionFactory; - - public void setConnectionFactory(ConnectionFactory connnectionFactory) { - this.connectionFactory = connnectionFactory; - } - - public DataRecord addRecord(InputStream stream) throws DataStoreException { - InputStream fileInput = null; - String tempId = null; - ResultSet rs = null; - try { - long tempModified; - while (true) { - try { - tempModified = System.currentTimeMillis(); - String id = UUID.randomUUID().toString(); - tempId = TEMP_PREFIX + id; - temporaryInUse.add(tempId); - // SELECT LENGTH, LAST_MODIFIED FROM DATASTORE WHERE ID=? - rs = conHelper.query(selectMetaSQL, tempId); - boolean hasNext = rs.next(); - DbUtility.close(rs); - rs = null; - if (hasNext) { - // re-try in the very, very unlikely event that the row already exists - continue; - } - // INSERT INTO DATASTORE VALUES(?, 0, ?, NULL) - conHelper.exec(insertTempSQL, tempId, tempModified); - break; - } catch (Exception e) { - throw convert("Can not insert new record", e); - } finally { - DbUtility.close(rs); - // prevent that rs.close() is called again - rs = null; - } - } - MessageDigest digest = getDigest(); - DigestInputStream dIn = new DigestInputStream(stream, digest); - CountingInputStream in = new CountingInputStream(dIn); - StreamWrapper wrapper; - if (STORE_SIZE_MINUS_ONE.equals(storeStream)) { - wrapper = new StreamWrapper(in, -1); - } else if (STORE_SIZE_MAX.equals(storeStream)) { - wrapper = new StreamWrapper(in, Integer.MAX_VALUE); - } else if (STORE_TEMP_FILE.equals(storeStream)) { - File temp = moveToTempFile(in); - long length = temp.length(); - wrapper = new StreamWrapper(new TempFileInputStream(temp, true), length); - } else { - throw new DataStoreException("Unsupported stream store algorithm: " + storeStream); - } - // UPDATE DATASTORE SET DATA=? WHERE ID=? - conHelper.exec(updateDataSQL, wrapper, tempId); - long length = in.getByteCount(); - DataIdentifier identifier = - new DataIdentifier(encodeHexString(digest.digest())); - usesIdentifier(identifier); - String id = identifier.toString(); - long newModified; - while (true) { - newModified = System.currentTimeMillis(); - if (checkExisting(tempId, length, identifier)) { - touch(identifier, newModified); - conHelper.exec(deleteSQL, tempId); - break; - } - try { - // UPDATE DATASTORE SET ID=?, LENGTH=?, LAST_MODIFIED=? - // WHERE ID=? AND LAST_MODIFIED=? - int count = conHelper.update(updateSQL, - id, length, newModified, tempId, tempModified); - // If update count is 0, the last modified time of the - // temporary row was changed - which means we need to - // re-try using a new last modified date (a later one) - // because we need to ensure the new last modified date - // is _newer_ than the old (otherwise the garbage - // collection could delete rows) - if (count != 0) { - // update was successful - break; - } - } catch (SQLException e) { - // duplicate key (the row already exists) - repeat - // we use exception handling for flow control here, which is bad, - // but the alternative is to use UPDATE ... WHERE ... (SELECT ...) - // which could cause a deadlock in some databases - also, - // duplicate key will only occur if somebody else concurrently - // added the same record (which is very unlikely) - } - // SELECT LENGTH, LAST_MODIFIED FROM DATASTORE WHERE ID=? - rs = conHelper.query(selectMetaSQL, tempId); - if (!rs.next()) { - // the row was deleted, which is unexpected / not allowed - String msg = - DIGEST + " temporary entry deleted: " + - " id=" + tempId + " length=" + length; - log.error(msg); - throw new DataStoreException(msg); - } - tempModified = rs.getLong(2); - DbUtility.close(rs); - rs = null; - } - usesIdentifier(identifier); - DbDataRecord record = new DbDataRecord(this, identifier, length, newModified); - return record; - } catch (Exception e) { - throw convert("Can not insert new record", e); - } finally { - if (tempId != null) { - temporaryInUse.remove(tempId); - } - DbUtility.close(rs); - if (fileInput != null) { - try { - fileInput.close(); - } catch (IOException e) { - throw convert("Can not close temporary file", e); - } - } - } - } - - /** - * Check if a row with this ID already exists. - * - * @return true if the row exists and the length matches - * @throw DataStoreException if a row exists, but the length is different - */ - private boolean checkExisting(String tempId, long length, DataIdentifier identifier) throws DataStoreException, SQLException { - String id = identifier.toString(); - // SELECT LENGTH, LAST_MODIFIED FROM DATASTORE WHERE ID=? - ResultSet rs = null; - try { - rs = conHelper.query(selectMetaSQL, id); - if (rs.next()) { - long oldLength = rs.getLong(1); - long lastModified = rs.getLong(2); - if (oldLength != length) { - String msg = - DIGEST + " collision: temp=" + tempId - + " id=" + id + " length=" + length - + " oldLength=" + oldLength; - log.error(msg); - throw new DataStoreException(msg); - } - DbUtility.close(rs); - rs = null; - touch(identifier, lastModified); - // row already exists - conHelper.exec(deleteSQL, tempId); - return true; - } - } finally { - DbUtility.close(rs); - } - return false; - } - - /** - * Creates a temp file and copies the data there. - * The input stream is closed afterwards. - * - * @param in the input stream - * @return the file - * @throws IOException - */ - private File moveToTempFile(InputStream in) throws IOException { - File temp = File.createTempFile("dbRecord", null); - TempFileInputStream.writeToFileAndClose(in, temp); - return temp; - } - - public synchronized void deleteRecord(DataIdentifier identifier) throws DataStoreException { - try { - conHelper.exec(deleteSQL, identifier.toString()); - } catch (Exception e) { - throw convert("Can not delete record", e); - } - } - - public synchronized int deleteAllOlderThan(long min) throws DataStoreException { - try { - ArrayList touch = new ArrayList(); - ArrayList ids = new ArrayList(inUse.keySet()); - for (DataIdentifier identifier: ids) { - if (identifier != null) { - touch.add(identifier.toString()); - } - } - touch.addAll(temporaryInUse); - for (String key : touch) { - updateLastModifiedDate(key, 0); - } - // DELETE FROM DATASTORE WHERE LAST_MODIFIED getAllIdentifiers() throws DataStoreException { - ArrayList list = new ArrayList(); - ResultSet rs = null; - try { - // SELECT ID FROM DATASTORE - rs = conHelper.query(selectAllSQL); - while (rs.next()) { - String id = rs.getString(1); - if (!id.startsWith(TEMP_PREFIX)) { - DataIdentifier identifier = new DataIdentifier(id); - list.add(identifier); - } - } - log.debug("Found " + list.size() + " identifiers."); - return list.iterator(); - } catch (Exception e) { - throw convert("Can not read records", e); - } finally { - DbUtility.close(rs); - } - } - - public int getMinRecordLength() { - return minRecordLength; - } - - /** - * Set the minimum object length. - * The maximum value is around 32000. - * - * @param minRecordLength the length - */ - public void setMinRecordLength(int minRecordLength) { - this.minRecordLength = minRecordLength; - } - - public DataRecord getRecordIfStored(DataIdentifier identifier) throws DataStoreException { - usesIdentifier(identifier); - ResultSet rs = null; - try { - String id = identifier.toString(); - // SELECT LENGTH, LAST_MODIFIED FROM DATASTORE WHERE ID = ? - rs = conHelper.query(selectMetaSQL, id); - if (!rs.next()) { - return null; - } - long length = rs.getLong(1); - long lastModified = rs.getLong(2); - DbUtility.close(rs); - rs = null; - lastModified = touch(identifier, lastModified); - return new DbDataRecord(this, identifier, length, lastModified); - } catch (Exception e) { - throw convert("Can not read identifier " + identifier, e); - } finally { - DbUtility.close(rs); - } - } - - /** - * Open the input stream. This method sets those fields of the caller - * that need to be closed once the input stream is read. - * - * @param inputStream the database input stream object - * @param identifier data identifier - * @throws DataStoreException if the data store could not be accessed, - * or if the given identifier is invalid - */ - InputStream openStream(DbInputStream inputStream, DataIdentifier identifier) throws DataStoreException { - ResultSet rs = null; - try { - // SELECT ID, DATA FROM DATASTORE WHERE ID = ? - rs = conHelper.query(selectDataSQL, identifier.toString()); - if (!rs.next()) { - throw new DataStoreException("Record not found: " + identifier); - } - InputStream stream = rs.getBinaryStream(2); - if (stream == null) { - stream = new ByteArrayInputStream(new byte[0]); - DbUtility.close(rs); - } else if (copyWhenReading) { - // If we copy while reading, create a temp file and close the stream - File temp = moveToTempFile(stream); - stream = new BufferedInputStream(new TempFileInputStream(temp, false)); - DbUtility.close(rs); - } else { - stream = new BufferedInputStream(stream); - inputStream.setResultSet(rs); - } - return stream; - } catch (Exception e) { - DbUtility.close(rs); - throw convert("Retrieving database resource ", e); - } - } - - public synchronized void init(String homeDir) throws DataStoreException { - try { - initDatabaseType(); - - conHelper = createConnectionHelper(getDataSource()); - - if (isSchemaCheckEnabled()) { - createCheckSchemaOperation().run(); - } - } catch (Exception e) { - throw convert("Can not init data store, driver=" + driver + " url=" + url + " user=" + user + - " schemaObjectPrefix=" + schemaObjectPrefix + " tableSQL=" + tableSQL + " createTableSQL=" + createTableSQL, e); - } - } - - private DataSource getDataSource() throws Exception { - if (getDataSourceName() == null || "".equals(getDataSourceName())) { - return connectionFactory.getDataSource(getDriver(), getUrl(), getUser(), getPassword()); - } else { - return connectionFactory.getDataSource(dataSourceName); - } - } - - /** - * This method is called from the {@link #init(String)} method of this class and returns a - * {@link ConnectionHelper} instance which is assigned to the {@code conHelper} field. Subclasses may - * override it to return a specialized connection helper. - * - * @param dataSrc the {@link DataSource} of this persistence manager - * @return a {@link ConnectionHelper} - * @throws Exception on error - */ - protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception { - return new ConnectionHelper(dataSrc, false); - } - - /** - * This method is called from {@link #init(String)} after the - * {@link #createConnectionHelper(DataSource)} method, and returns a default {@link CheckSchemaOperation}. - * - * @return a new {@link CheckSchemaOperation} instance - */ - protected final CheckSchemaOperation createCheckSchemaOperation() { - String tableName = tablePrefix + schemaObjectPrefix + tableSQL; - return new CheckSchemaOperation(conHelper, new ByteArrayInputStream(createTableSQL.getBytes()), tableName); - } - - protected void initDatabaseType() throws DataStoreException { - boolean failIfNotFound = false; - if (databaseType == null) { - if (dataSourceName != null) { - try { - databaseType = connectionFactory.getDataBaseType(dataSourceName); - } catch (RepositoryException e) { - throw new DataStoreException(e); - } - } else { - if (!url.startsWith("jdbc:")) { - return; - } - int start = "jdbc:".length(); - int end = url.indexOf(':', start); - databaseType = url.substring(start, end); - } - } else { - failIfNotFound = true; - } - - InputStream in = - DbDataStore.class.getResourceAsStream(databaseType + ".properties"); - if (in == null) { - if (failIfNotFound) { - String msg = - "Configuration error: The resource '" + databaseType - + ".properties' could not be found;" - + " Please verify the databaseType property"; - log.debug(msg); - throw new DataStoreException(msg); - } else { - return; - } - } - Properties prop = new Properties(); - try { - try { - prop.load(in); - } finally { - in.close(); - } - } catch (IOException e) { - String msg = "Configuration error: Could not read properties '" + databaseType + ".properties'"; - log.debug(msg); - throw new DataStoreException(msg, e); - } - if (driver == null) { - driver = getProperty(prop, "driver", driver); - } - tableSQL = getProperty(prop, "table", tableSQL); - createTableSQL = getProperty(prop, "createTable", createTableSQL); - insertTempSQL = getProperty(prop, "insertTemp", insertTempSQL); - updateDataSQL = getProperty(prop, "updateData", updateDataSQL); - updateLastModifiedSQL = getProperty(prop, "updateLastModified", updateLastModifiedSQL); - updateSQL = getProperty(prop, "update", updateSQL); - deleteSQL = getProperty(prop, "delete", deleteSQL); - deleteOlderSQL = getProperty(prop, "deleteOlder", deleteOlderSQL); - selectMetaSQL = getProperty(prop, "selectMeta", selectMetaSQL); - selectAllSQL = getProperty(prop, "selectAll", selectAllSQL); - selectDataSQL = getProperty(prop, "selectData", selectDataSQL); - storeStream = getProperty(prop, "storeStream", storeStream); - if (!STORE_SIZE_MINUS_ONE.equals(storeStream) - && !STORE_TEMP_FILE.equals(storeStream) - && !STORE_SIZE_MAX.equals(storeStream)) { - String msg = "Unsupported Stream store mechanism: " + storeStream - + " supported are: " + STORE_SIZE_MINUS_ONE + ", " - + STORE_TEMP_FILE + ", " + STORE_SIZE_MAX; - log.debug(msg); - throw new DataStoreException(msg); - } - } - - /** - * Get the expanded property value. The following placeholders are supported: - * ${table}: the table name (the default is DATASTORE) and - * ${tablePrefix}: tablePrefix plus schemaObjectPrefix as set in the configuration - * - * @param prop the properties object - * @param key the key - * @param defaultValue the default value - * @return the property value (placeholders are replaced) - */ - protected String getProperty(Properties prop, String key, String defaultValue) { - String sql = prop.getProperty(key, defaultValue); - sql = Text.replace(sql, "${table}", tableSQL).trim(); - sql = Text.replace(sql, "${tablePrefix}", tablePrefix + schemaObjectPrefix).trim(); - return sql; - } - - /** - * Convert an exception to a data store exception. - * - * @param cause the message - * @param e the root cause - * @return the data store exception - */ - protected DataStoreException convert(String cause, Exception e) { - log.warn(cause, e); - if (e instanceof DataStoreException) { - return (DataStoreException) e; - } else { - return new DataStoreException(cause, e); - } - } - - public void updateModifiedDateOnAccess(long before) { - log.debug("Update modifiedDate on access before " + before); - minModifiedDate = before; - } - - /** - * Update the modified date of an entry if required. - * - * @param identifier the entry identifier - * @param lastModified the current last modified date - * @return the new modified date - */ - long touch(DataIdentifier identifier, long lastModified) throws DataStoreException { - usesIdentifier(identifier); - return updateLastModifiedDate(identifier.toString(), lastModified); - } - - private long updateLastModifiedDate(String key, long lastModified) throws DataStoreException { - if (lastModified < minModifiedDate) { - long now = System.currentTimeMillis(); - try { - // UPDATE DATASTORE SET LAST_MODIFIED = ? WHERE ID = ? AND LAST_MODIFIED < ? - conHelper.update(updateLastModifiedSQL, now, key, now); - return now; - } catch (Exception e) { - throw convert("Can not update lastModified", e); - } - } - return lastModified; - } - - /** - * Get the database type (if set). - * @return the database type - */ - public String getDatabaseType() { - return databaseType; - } - - /** - * Set the database type. By default the sub-protocol of the JDBC database URL is used if it is not set. - * It must match the resource file [databaseType].properties. Example: mysql. - * - * @param databaseType - */ - public void setDatabaseType(String databaseType) { - this.databaseType = databaseType; - } - - /** - * Get the database driver - * - * @return the driver - */ - public String getDriver() { - return driver; - } - - /** - * Set the database driver class name. - * If not set, the default driver class name for the database type is used, - * as set in the [databaseType].properties resource; key 'driver'. - * - * @param driver - */ - public void setDriver(String driver) { - this.driver = driver; - } - - /** - * Get the password. - * - * @return the password - */ - public String getPassword() { - return password; - } - - /** - * Set the password. - * - * @param password - */ - public void setPassword(String password) { - this.password = password; - } - - /** - * Get the database URL. - * - * @return the URL - */ - public String getUrl() { - return url; - } - - /** - * Set the database URL. - * Example: jdbc:postgresql:test - * - * @param url - */ - public void setUrl(String url) { - this.url = url; - } - - /** - * Get the user name. - * - * @return the user name - */ - public String getUser() { - return user; - } - - /** - * Set the user name. - * - * @param user - */ - public void setUser(String user) { - this.user = user; - } - - /** - * @return whether the schema check is enabled - */ - public final boolean isSchemaCheckEnabled() { - return schemaCheckEnabled; - } - - /** - * @param enabled set whether the schema check is enabled - */ - public final void setSchemaCheckEnabled(boolean enabled) { - schemaCheckEnabled = enabled; - } - - public synchronized void close() throws DataStoreException { - // nothing to do - } - - protected void usesIdentifier(DataIdentifier identifier) { - inUse.put(identifier, new WeakReference(identifier)); - } - - public void clearInUse() { - inUse.clear(); - } - - protected synchronized MessageDigest getDigest() throws DataStoreException { - try { - return MessageDigest.getInstance(DIGEST); - } catch (NoSuchAlgorithmException e) { - throw convert("No such algorithm: " + DIGEST, e); - } - } - - /** - * Get the maximum number of concurrent connections. - * - * @deprecated - * @return the maximum number of connections. - */ - public int getMaxConnections() { - return -1; - } - - /** - * Set the maximum number of concurrent connections in the pool. - * At least 3 connections are required if the garbage collection process is used. - * - *@deprecated - * @param maxConnections the new value - */ - public void setMaxConnections(int maxConnections) { - // no effect - } - - /** - * Is a stream copied to a temporary file before returning? - * - * @return the setting - */ - public boolean getCopyWhenReading() { - return copyWhenReading; - } - - /** - * The the copy setting. If enabled, - * a stream is always copied to a temporary file when reading a stream. - * - * @param copyWhenReading the new setting - */ - public void setCopyWhenReading(boolean copyWhenReading) { - this.copyWhenReading = copyWhenReading; - } - - /** - * Get the table prefix. - * - * @return the table prefix. - */ - public String getTablePrefix() { - return tablePrefix; - } - - /** - * Set the new table prefix. The default is empty. - * The table name is constructed like this: - * ${tablePrefix}${schemaObjectPrefix}${tableName} - * - * @param tablePrefix the new value - */ - public void setTablePrefix(String tablePrefix) { - this.tablePrefix = tablePrefix; - } - - /** - * Get the schema prefix. - * - * @return the schema object prefix - */ - public String getSchemaObjectPrefix() { - return schemaObjectPrefix; - } - - /** - * Set the schema object prefix. The default is empty. - * The table name is constructed like this: - * ${tablePrefix}${schemaObjectPrefix}${tableName} - * - * @param schemaObjectPrefix the new prefix - */ - public void setSchemaObjectPrefix(String schemaObjectPrefix) { - this.schemaObjectPrefix = schemaObjectPrefix; - } - - public String getDataSourceName() { - return dataSourceName; - } - - public void setDataSourceName(String dataSourceName) { - this.dataSourceName = dataSourceName; - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DbInputStream.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DbInputStream.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DbInputStream.java (working copy) @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data.db; - -import java.io.EOFException; -import java.io.IOException; -import java.sql.ResultSet; - -import org.apache.commons.io.input.AutoCloseInputStream; -import org.apache.jackrabbit.core.data.DataIdentifier; -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.util.db.DbUtility; - -/** - * This class represents an input stream backed by a database. The database - * objects are only acquired when reading from the stream, and stay open until - * the stream is closed, fully read, or garbage collected. - *

- * This class does not support mark/reset. It is always to be wrapped - * using a BufferedInputStream. - */ -public class DbInputStream extends AutoCloseInputStream { - - protected DbDataStore store; - protected DataIdentifier identifier; - protected boolean endOfStream; - - protected ResultSet rs; - - /** - * Create a database input stream for the given identifier. - * Database access is delayed until the first byte is read from the stream. - * - * @param store the database data store - * @param identifier the data identifier - */ - protected DbInputStream(DbDataStore store, DataIdentifier identifier) { - super(null); - this.store = store; - this.identifier = identifier; - } - - /** - * Open the stream if required. - * - * @throws IOException - */ - protected void openStream() throws IOException { - if (endOfStream) { - throw new EOFException(); - } - if (in == null) { - try { - in = store.openStream(this, identifier); - } catch (DataStoreException e) { - IOException e2 = new IOException(e.getMessage()); - e2.initCause(e); - throw e2; - } - } - } - - /** - * {@inheritDoc} - * When the stream is consumed, the database objects held by the instance are closed. - */ - public int read() throws IOException { - if (endOfStream) { - return -1; - } - openStream(); - int c = in.read(); - if (c == -1) { - endOfStream = true; - close(); - } - return c; - } - - /** - * {@inheritDoc} - * When the stream is consumed, the database objects held by the instance are closed. - */ - public int read(byte[] b) throws IOException { - return read(b, 0, b.length); - } - - /** - * {@inheritDoc} - * When the stream is consumed, the database objects held by the instance are closed. - */ - public int read(byte[] b, int off, int len) throws IOException { - if (endOfStream) { - return -1; - } - openStream(); - int c = in.read(b, off, len); - if (c == -1) { - endOfStream = true; - close(); - } - return c; - } - - /** - * {@inheritDoc} - * When the stream is consumed, the database objects held by the instance are closed. - */ - public void close() throws IOException { - if (in != null) { - in.close(); - in = null; - // some additional database objects - // may need to be closed - if (rs != null) { - DbUtility.close(rs); - rs = null; - } - } - } - - /** - * {@inheritDoc} - */ - public long skip(long n) throws IOException { - if (endOfStream) { - return -1; - } - openStream(); - return in.skip(n); - } - - /** - * {@inheritDoc} - */ - public int available() throws IOException { - if (endOfStream) { - return 0; - } - openStream(); - return in.available(); - } - - /** - * This method does nothing. - */ - public void mark(int readlimit) { - // do nothing - } - - /** - * This method does nothing. - */ - public void reset() throws IOException { - // do nothing - } - - /** - * Check whether mark and reset are supported. - * - * @return false - */ - public boolean markSupported() { - return false; - } - - /** - * Set the result set of this input stream. This object must be closed once - * the stream is closed. - * - * @param rs the result set - */ - void setResultSet(ResultSet rs) { - this.rs = rs; - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DerbyDataStore.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DerbyDataStore.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/DerbyDataStore.java (working copy) @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data.db; - -import java.sql.SQLException; - -import javax.sql.DataSource; - -import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.util.db.ConnectionHelper; -import org.apache.jackrabbit.core.util.db.DerbyConnectionHelper; - -/** - * The Derby data store closes the database when the data store is closed - * (embedded databases only). - */ -public class DerbyDataStore extends DbDataStore { - - /** - * {@inheritDoc} - */ - @Override - protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception { - return new DerbyConnectionHelper(dataSrc, false); - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized void close() throws DataStoreException { - super.close(); - try { - ((DerbyConnectionHelper) conHelper).shutDown(getDriver()); - } catch (SQLException e) { - throw new DataStoreException(e); - } - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/TempFileInputStream.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/TempFileInputStream.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/db/TempFileInputStream.java (working copy) @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data.db; - -import java.io.BufferedInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - -import org.apache.commons.io.IOUtils; -import org.apache.commons.io.input.AutoCloseInputStream; - -/** - * An input stream from a temporary file. The file is deleted when the stream is - * closed, fully read, or garbage collected. - *

- * This class does not support mark/reset. It is always to be wrapped - * using a BufferedInputStream. - */ -public class TempFileInputStream extends AutoCloseInputStream { - - private final File file; - private boolean closed; - private boolean delayedResourceCleanup = true; - - /** - * Copy the data to a file and close the input stream afterwards. - * - * @param in the input stream - * @param file the target file - * @return the size of the file - */ - public static long writeToFileAndClose(InputStream in, File file) throws IOException { - OutputStream out = new FileOutputStream(file); - IOUtils.copy(in, out); - out.close(); - in.close(); - return file.length(); - } - - /** - * Construct a new temporary file input stream. - * The file is deleted if the input stream is closed or fully read and - * delayedResourceCleanup was set to true. Otherwise you must call {@link #deleteFile()}. - * Deleting is only attempted once. - * - * @param file the temporary file - * @param delayedResourceCleanup - */ - public TempFileInputStream(File file, boolean delayedResourceCleanup) throws FileNotFoundException { - super(new BufferedInputStream(new FileInputStream(file))); - this.file = file; - this.delayedResourceCleanup = delayedResourceCleanup; - } - - public File getFile() { - return file; - } - - public void deleteFile() { - file.delete(); - } - - private int closeIfEOF(int read) throws IOException { - if (read < 0) { - close(); - } - return read; - } - - public void close() throws IOException { - if (!closed) { - in.close(); - if (!delayedResourceCleanup) { - deleteFile(); - } - closed = true; - } - } - - public int available() throws IOException { - return in.available(); - } - - /** - * This method does nothing. - */ - public void mark(int readlimit) { - // do nothing - } - - /** - * Check whether mark and reset are supported. - * - * @return false - */ - public boolean markSupported() { - return false; - } - - public long skip(long n) throws IOException { - return in.skip(n); - } - - public void reset() throws IOException { - in.reset(); - } - - public int read(byte[] b, int off, int len) throws IOException { - if (closed) { - return -1; - } - return closeIfEOF(in.read(b, off, len)); - } - - public int read(byte[] b) throws IOException { - if (closed) { - return -1; - } - return closeIfEOF(in.read(b)); - } - - public int read() throws IOException { - if (closed) { - return -1; - } - return closeIfEOF(in.read()); - } - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/FileDataRecord.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/FileDataRecord.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/FileDataRecord.java (working copy) @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; - -/** - * Data record that is based on a normal file. - */ -public class FileDataRecord extends AbstractDataRecord { - - /** - * The file that contains the binary stream. - */ - private final File file; - - /** - * Creates a data record based on the given identifier and file. - * - * @param identifier data identifier - * @param file file that contains the binary stream - */ - public FileDataRecord( - AbstractDataStore store, DataIdentifier identifier, File file) { - super(store, identifier); - assert file.isFile(); - this.file = file; - } - - /** - * {@inheritDoc} - */ - public long getLength() { - return file.length(); - } - - /** - * {@inheritDoc} - */ - public InputStream getStream() throws DataStoreException { - try { - return new LazyFileInputStream(file); - } catch (IOException e) { - throw new DataStoreException("Error opening input stream of " + file.getAbsolutePath(), e); - } - } - - /** - * {@inheritDoc} - */ - public long getLastModified() { - return file.lastModified(); - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/FileDataStore.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/FileDataStore.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/FileDataStore.java (working copy) @@ -1,481 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.RandomAccessFile; -import java.lang.ref.WeakReference; -import java.security.DigestOutputStream; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.WeakHashMap; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.IOUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Simple file-based data store. Data records are stored as normal files - * named using a message digest of the contained binary stream. - * - * Configuration: - *

- * <DataStore class="org.apache.jackrabbit.core.data.FileDataStore">
- *     <param name="{@link #setPath(String) path}" value="/data/datastore"/>
- *     <param name="{@link #setMinRecordLength(int) minRecordLength}" value="1024"/>
- * </DataStore>
- * 
- *

- * If the directory is not set, the directory <repository home>/repository/datastore is used. - *

- * A three level directory structure is used to avoid placing too many - * files in a single directory. The chosen structure is designed to scale - * up to billions of distinct records. - *

- * This implementation relies on the underlying file system to support - * atomic O(1) move operations with {@link File#renameTo(File)}. - */ -public class FileDataStore extends AbstractDataStore - implements MultiDataStoreAware { - - /** - * Logger instance - */ - private static Logger log = LoggerFactory.getLogger(FileDataStore.class); - - /** - * The digest algorithm used to uniquely identify records. - */ - private static final String DIGEST = "SHA-1"; - - /** - * The default value for the minimum object size. - */ - private static final int DEFAULT_MIN_RECORD_LENGTH = 100; - - /** - * The maximum last modified time resolution of the file system. - */ - private static final int ACCESS_TIME_RESOLUTION = 2000; - - /** - * Name of the directory used for temporary files. - * Must be at least 3 characters. - */ - private static final String TMP = "tmp"; - - /** - * The minimum modified date. If a file is accessed (read or write) with a modified date - * older than this value, the modified date is updated to the current time. - */ - private long minModifiedDate; - - /** - * The directory that contains all the data record files. The structure - * of content within this directory is controlled by this class. - */ - private File directory; - - /** - * The name of the directory that contains all the data record files. The structure - * of content within this directory is controlled by this class. - */ - private String path; - - /** - * The minimum size of an object that should be stored in this data store. - */ - private int minRecordLength = DEFAULT_MIN_RECORD_LENGTH; - - /** - * All data identifiers that are currently in use are in this set until they are garbage collected. - */ - protected Map> inUse = - Collections.synchronizedMap(new WeakHashMap>()); - - /** - * Initialized the data store. - * If the path is not set, <repository home>/repository/datastore is used. - * This directory is automatically created if it does not yet exist. - * - * @param homeDir - */ - public void init(String homeDir) { - if (path == null) { - path = homeDir + "/repository/datastore"; - } - directory = new File(path); - directory.mkdirs(); - } - - /** - * Get a data record for the given identifier. - * - * @param identifier the identifier - * @return the data record or null - */ - public DataRecord getRecordIfStored(DataIdentifier identifier) throws DataStoreException { - File file = getFile(identifier); - synchronized (this) { - if (!file.exists()) { - return null; - } - if (minModifiedDate != 0) { - // only check when running garbage collection - if (getLastModified(file) < minModifiedDate) { - setLastModified(file, System.currentTimeMillis() + ACCESS_TIME_RESOLUTION); - } - } - usesIdentifier(identifier); - return new FileDataRecord(this, identifier, file); - } - } - - private void usesIdentifier(DataIdentifier identifier) { - inUse.put(identifier, new WeakReference(identifier)); - } - - /** - * Creates a new data record. - * The stream is first consumed and the contents are saved in a temporary file - * and the SHA-1 message digest of the stream is calculated. If a - * record with the same SHA-1 digest (and length) is found then it is - * returned. Otherwise the temporary file is moved in place to become - * the new data record that gets returned. - * - * @param input binary stream - * @return data record that contains the given stream - * @throws DataStoreException if the record could not be created - */ - public DataRecord addRecord(InputStream input) throws DataStoreException { - File temporary = null; - try { - temporary = newTemporaryFile(); - DataIdentifier tempId = new DataIdentifier(temporary.getName()); - usesIdentifier(tempId); - // Copy the stream to the temporary file and calculate the - // stream length and the message digest of the stream - long length = 0; - MessageDigest digest = MessageDigest.getInstance(DIGEST); - OutputStream output = new DigestOutputStream( - new FileOutputStream(temporary), digest); - try { - length = IOUtils.copyLarge(input, output); - } finally { - output.close(); - } - DataIdentifier identifier = - new DataIdentifier(encodeHexString(digest.digest())); - File file; - - synchronized (this) { - // Check if the same record already exists, or - // move the temporary file in place if needed - usesIdentifier(identifier); - file = getFile(identifier); - if (!file.exists()) { - File parent = file.getParentFile(); - parent.mkdirs(); - if (temporary.renameTo(file)) { - // no longer need to delete the temporary file - temporary = null; - } else { - throw new IOException( - "Can not rename " + temporary.getAbsolutePath() - + " to " + file.getAbsolutePath() - + " (media read only?)"); - } - } else { - long now = System.currentTimeMillis(); - if (getLastModified(file) < now + ACCESS_TIME_RESOLUTION) { - setLastModified(file, now + ACCESS_TIME_RESOLUTION); - } - } - if (file.length() != length) { - // Sanity checks on the record file. These should never fail, - // but better safe than sorry... - if (!file.isFile()) { - throw new IOException("Not a file: " + file); - } - throw new IOException(DIGEST + " collision: " + file); - } - } - // this will also make sure that - // tempId is not garbage collected until here - inUse.remove(tempId); - return new FileDataRecord(this, identifier, file); - } catch (NoSuchAlgorithmException e) { - throw new DataStoreException(DIGEST + " not available", e); - } catch (IOException e) { - throw new DataStoreException("Could not add record", e); - } finally { - if (temporary != null) { - temporary.delete(); - } - } - } - - /** - * Returns the identified file. This method implements the pattern - * used to avoid problems with too many files in a single directory. - *

- * No sanity checks are performed on the given identifier. - * - * @param identifier data identifier - * @return identified file - */ - private File getFile(DataIdentifier identifier) { - usesIdentifier(identifier); - String string = identifier.toString(); - File file = directory; - file = new File(file, string.substring(0, 2)); - file = new File(file, string.substring(2, 4)); - file = new File(file, string.substring(4, 6)); - return new File(file, string); - } - - /** - * Returns a unique temporary file to be used for creating a new - * data record. - * - * @return temporary file - * @throws IOException - */ - private File newTemporaryFile() throws IOException { - // the directory is already created in the init method - return File.createTempFile(TMP, null, directory); - } - - public void updateModifiedDateOnAccess(long before) { - minModifiedDate = before; - } - - public void deleteRecord(DataIdentifier identifier) - throws DataStoreException { - File file = getFile(identifier); - synchronized (this) { - if (file.exists()) { - if (!file.delete()) { - log.warn("Failed to delete file " + file.getAbsolutePath()); - } - } - } - } - - public int deleteAllOlderThan(long min) { - int count = 0; - for (File file : directory.listFiles()) { - if (file.isDirectory()) { // skip top-level files - count += deleteOlderRecursive(file, min); - } - } - return count; - } - - private int deleteOlderRecursive(File file, long min) { - int count = 0; - if (file.isFile() && file.exists() && file.canWrite()) { - synchronized (this) { - long lastModified; - try { - lastModified = getLastModified(file); - } catch (DataStoreException e) { - log.warn("Failed to read modification date; file not deleted", e); - // don't delete the file, since the lastModified date is uncertain - lastModified = min; - } - if (lastModified < min) { - DataIdentifier id = new DataIdentifier(file.getName()); - if (!inUse.containsKey(id)) { - if (log.isInfoEnabled()) { - log.info("Deleting old file " + file.getAbsolutePath() + - " modified: " + new Timestamp(lastModified).toString() + - " length: " + file.length()); - } - if (!file.delete()) { - log.warn("Failed to delete old file " + file.getAbsolutePath()); - } - count++; - } - } - } - } else if (file.isDirectory()) { - File[] list = file.listFiles(); - if (list != null) { - for (File f: list) { - count += deleteOlderRecursive(f, min); - } - } - - // JCR-1396: FileDataStore Garbage Collector and empty directories - // Automatic removal of empty directories (but not the root!) - synchronized (this) { - list = file.listFiles(); - if (list != null && list.length == 0) { - file.delete(); - } - } - } - return count; - } - - private void listRecursive(List list, File file) { - File[] files = file.listFiles(); - if (files != null) { - for (File f : files) { - if (f.isDirectory()) { - listRecursive(list, f); - } else { - list.add(f); - } - } - } - } - - public Iterator getAllIdentifiers() { - ArrayList files = new ArrayList(); - for (File file : directory.listFiles()) { - if (file.isDirectory()) { // skip top-level files - listRecursive(files, file); - } - } - - ArrayList identifiers = new ArrayList(); - for (File f: files) { - String name = f.getName(); - identifiers.add(new DataIdentifier(name)); - } - log.debug("Found " + identifiers.size() + " identifiers."); - return identifiers.iterator(); - } - - public void clearInUse() { - inUse.clear(); - } - - /** - * Get the name of the directory where this data store keeps the files. - * - * @return the full path name - */ - public String getPath() { - return path; - } - - /** - * Set the name of the directory where this data store keeps the files. - * - * @param directoryName the path name - */ - public void setPath(String directoryName) { - this.path = directoryName; - } - - public int getMinRecordLength() { - return minRecordLength; - } - - /** - * Set the minimum object length. - * - * @param minRecordLength the length - */ - public void setMinRecordLength(int minRecordLength) { - this.minRecordLength = minRecordLength; - } - - public void close() { - // nothing to do - } - - //---------------------------------------------------------< protected >-- - - @Override - protected byte[] getOrCreateReferenceKey() throws DataStoreException { - File file = new File(directory, "reference.key"); - try { - if (file.exists()) { - return FileUtils.readFileToByteArray(file); - } else { - byte[] key = super.getOrCreateReferenceKey(); - FileUtils.writeByteArrayToFile(file, key); - return key; - } - } catch (IOException e) { - throw new DataStoreException( - "Unable to access reference key file " + file.getPath(), e); - } - } - - //-----------------------------------------------------------< private >-- - - /** - * Get the last modified date of a file. - * - * @param file the file - * @return the last modified date - * @throws DataStoreException if reading fails - */ - private static long getLastModified(File file) throws DataStoreException { - long lastModified = file.lastModified(); - if (lastModified == 0) { - throw new DataStoreException("Failed to read record modified date: " + file.getAbsolutePath()); - } - return lastModified; - } - - /** - * Set the last modified date of a file, if the file is writable. - * - * @param file the file - * @param time the new last modified date - * @throws DataStoreException if the file is writable but modifying the date fails - */ - private static void setLastModified(File file, long time) throws DataStoreException { - if (!file.setLastModified(time)) { - if (!file.canWrite()) { - // if we can't write to the file, so garbage collection will also not delete it - // (read only files or file systems) - return; - } - try { - // workaround for Windows: if the file is already open for reading - // (in this or another process), then setting the last modified date - // doesn't work - see also JCR-2872 - RandomAccessFile r = new RandomAccessFile(file, "rw"); - try { - r.setLength(r.length()); - } finally { - r.close(); - } - } catch (IOException e) { - throw new DataStoreException("An IO Exception occurred while trying to set the last modified date: " + file.getAbsolutePath(), e); - } - } - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/GarbageCollector.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/GarbageCollector.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/GarbageCollector.java (working copy) @@ -1,510 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data; - -import org.apache.jackrabbit.api.management.DataStoreGarbageCollector; -import org.apache.jackrabbit.api.management.MarkEventListener; -import org.apache.jackrabbit.core.RepositoryContext; -import org.apache.jackrabbit.core.SessionImpl; -import org.apache.jackrabbit.core.id.NodeId; -import org.apache.jackrabbit.core.id.PropertyId; -import org.apache.jackrabbit.core.observation.SynchronousEventListener; -import org.apache.jackrabbit.core.persistence.IterablePersistenceManager; -import org.apache.jackrabbit.core.persistence.util.NodeInfo; -import org.apache.jackrabbit.core.state.ItemStateException; -import org.apache.jackrabbit.core.state.NoSuchItemStateException; -import org.apache.jackrabbit.core.state.NodeState; -import org.apache.jackrabbit.core.state.PropertyState; -import org.apache.jackrabbit.core.value.InternalValue; -import org.apache.jackrabbit.spi.Name; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; - -import javax.jcr.InvalidItemStateException; -import javax.jcr.Item; -import javax.jcr.Node; -import javax.jcr.NodeIterator; -import javax.jcr.PathNotFoundException; -import javax.jcr.Property; -import javax.jcr.PropertyIterator; -import javax.jcr.PropertyType; -import javax.jcr.RepositoryException; -import javax.jcr.Session; -import javax.jcr.UnsupportedRepositoryOperationException; -import javax.jcr.Workspace; -import javax.jcr.observation.Event; -import javax.jcr.observation.EventIterator; -import javax.jcr.observation.ObservationManager; - -/** - * Garbage collector for DataStore. This implementation iterates through all - * nodes and reads the binary properties. To detect nodes that are moved while - * the scan runs, event listeners are started. Like the well known garbage - * collection in Java, the items that are still in use are marked. Currently - * this is achieved by updating the modified date of the entries. Newly added - * entries are detected because the modified date is changed when they are - * added. - *

- * Example code to run the data store garbage collection: - *

- * JackrabbitRepositoryFactory jf = (JackrabbitRepositoryFactory) factory;
- * RepositoryManager m = jf.getRepositoryManager((JackrabbitRepository) repository);
- * GarbageCollector gc = m.createDataStoreGarbageCollector();
- * try {
- *     gc.mark();
- *     gc.sweep();
- * } finally {
- *     gc.close();
- * }
- * 
- */ -public class GarbageCollector implements DataStoreGarbageCollector { - - /** logger instance */ - static final Logger LOG = LoggerFactory.getLogger(GarbageCollector.class); - - /** - * The number of nodes to fetch at once from the persistence manager. Defaults to 8kb - */ - private static final int NODESATONCE = Integer.getInteger("org.apache.jackrabbit.garbagecollector.nodesatonce", 1024 * 8); - - /** - * Set this System Property to true to speed up the node traversing in a binary focused repository. - * See JCR-3708 - */ - private static final boolean NODE_ID_SCAN = Boolean.getBoolean("org.apache.jackrabbit.garbagecollector.node_id.scan"); - - private MarkEventListener callback; - - private long sleepBetweenNodes; - - protected int testDelay; - - private final DataStore store; - - private long startScanTimestamp; - - private final ArrayList listeners = new ArrayList(); - - private final IterablePersistenceManager[] pmList; - - private final SessionImpl[] sessionList; - - private final AtomicBoolean closed = new AtomicBoolean(); - - private final RepositoryContext context; - - private boolean persistenceManagerScan; - - private volatile RepositoryException observationException; - - /** - * Create a new garbage collector. - * This method is usually not called by the application, it is called - * by SessionImpl.createDataStoreGarbageCollector(). - * - * @param context repository context - * @param dataStore the data store to be garbage-collected - * @param list the persistence managers - * @param sessionList the sessions to access the workspaces - */ - - public GarbageCollector(RepositoryContext context, - DataStore dataStore, IterablePersistenceManager[] list, - SessionImpl[] sessionList) { - this.context = context; - this.store = dataStore; - this.pmList = list; - this.persistenceManagerScan = list != null; - this.sessionList = sessionList; - } - - public void setSleepBetweenNodes(long millis) { - this.sleepBetweenNodes = millis; - } - - public long getSleepBetweenNodes() { - return sleepBetweenNodes; - } - - /** - * When testing the garbage collection, a delay is used instead of simulating concurrent access. - * - * @param testDelay the delay in milliseconds - */ - public void setTestDelay(int testDelay) { - this.testDelay = testDelay; - } - - public void setMarkEventListener(MarkEventListener callback) { - this.callback = callback; - } - - public void mark() throws RepositoryException { - if (store == null) { - throw new RepositoryException("No DataStore configured."); - } - long now = System.currentTimeMillis(); - if (startScanTimestamp == 0) { - startScanTimestamp = now; - store.updateModifiedDateOnAccess(startScanTimestamp); - } - - if (pmList == null || !persistenceManagerScan) { - for (SessionImpl s : sessionList) { - scanNodes(s); - } - } else { - try { - if (!NODE_ID_SCAN) { - scanPersistenceManagersByNodeInfos(); - } else { - scanPersistenceManagersByNodeIds(); - } - } catch (ItemStateException e) { - throw new RepositoryException(e); - } - } - } - - private void scanNodes(SessionImpl session) throws RepositoryException { - - // add a listener to get 'moved' nodes - Session clonedSession = session.createSession(session.getWorkspace().getName()); - listeners.add(new Listener(this, clonedSession)); - - // adding a link to a BLOB updates the modified date - // reading usually doesn't, but when scanning, it does - recurse(session.getRootNode(), sleepBetweenNodes); - } - - public void setPersistenceManagerScan(boolean allow) { - persistenceManagerScan = allow; - } - - public boolean isPersistenceManagerScan() { - return persistenceManagerScan; - } - - private void scanPersistenceManagersByNodeInfos() throws RepositoryException, ItemStateException { - int pmCount = 0; - for (IterablePersistenceManager pm : pmList) { - pmCount++; - int count = 0; - Map batch = pm.getAllNodeInfos(null, NODESATONCE); - while (!batch.isEmpty()) { - NodeId lastId = null; - for (NodeInfo info : batch.values()) { - count++; - if (count % 1000 == 0) { - LOG.debug(pm.toString() + " ("+pmCount + "/" + pmList.length + "): analyzed " + count + " nodes..."); - } - lastId = info.getId(); - if (callback != null) { - callback.beforeScanning(null); - } - if (info.hasBlobsInDataStore()) { - try { - NodeState state = pm.load(info.getId()); - Set propertyNames = state.getPropertyNames(); - for (Name name : propertyNames) { - PropertyId pid = new PropertyId(info.getId(), name); - PropertyState ps = pm.load(pid); - if (ps.getType() == PropertyType.BINARY) { - for (InternalValue v : ps.getValues()) { - // getLength will update the last modified date - // if the persistence manager scan is running - v.getLength(); - } - } - } - } catch (NoSuchItemStateException ignored) { - // the node may have been deleted in the meantime - } - } - } - batch = pm.getAllNodeInfos(lastId, NODESATONCE); - } - } - NodeInfo.clearPool(); - } - - private void scanPersistenceManagersByNodeIds() throws RepositoryException, ItemStateException { - int pmCount = 0; - for (IterablePersistenceManager pm : pmList) { - pmCount++; - List allNodeIds = pm.getAllNodeIds(null, 0); - int overAllCount = allNodeIds.size(); - int count = 0; - for (NodeId id : allNodeIds) { - count++; - if (count % 1000 == 0) { - LOG.debug(pm.toString() + " ("+pmCount + "/" + pmList.length + "): analyzed " + count + " nodes [" + overAllCount + "]..."); - } - if (callback != null) { - callback.beforeScanning(null); - } - try { - NodeState state = pm.load(id); - Set propertyNames = state.getPropertyNames(); - for (Name name : propertyNames) { - PropertyId pid = new PropertyId(id, name); - PropertyState ps = pm.load(pid); - if (ps.getType() == PropertyType.BINARY) { - for (InternalValue v : ps.getValues()) { - // getLength will update the last modified date - // if the persistence manager scan is running - v.getLength(); - } - } - } - } catch (NoSuchItemStateException e) { - // the node may have been deleted or moved in the meantime - // ignore it - } - } - } - } - - /** - * Reset modifiedDateOnAccess to 0 and stop the observation - * listener if any are installed. - */ - public void stopScan() throws RepositoryException { - // reset updateModifiedDateOnAccess to OL - store.updateModifiedDateOnAccess(0L); - - if (listeners.size() > 0) { - for (Listener listener : listeners) { - listener.stop(); - } - listeners.clear(); - } - checkObservationException(); - context.setGcRunning(false); - } - - public int sweep() throws RepositoryException { - if (startScanTimestamp == 0) { - throw new RepositoryException("scan must be called first"); - } - stopScan(); - return store.deleteAllOlderThan(startScanTimestamp); - } - - /** - * Get the data store if one is used. - * - * @return the data store, or null - */ - public DataStore getDataStore() { - return store; - } - - void recurse(final Node n, long sleep) throws RepositoryException { - if (sleep > 0) { - try { - Thread.sleep(sleep); - } catch (InterruptedException e) { - // ignore - } - } - if (callback != null) { - callback.beforeScanning(n); - } - try { - for (PropertyIterator it = n.getProperties(); it.hasNext();) { - Property p = it.nextProperty(); - try { - if (p.getType() == PropertyType.BINARY) { - if (n.hasProperty("jcr:uuid")) { - rememberNode(n.getProperty("jcr:uuid").getString()); - } else { - rememberNode(n.getPath()); - } - if (p.isMultiple()) { - checkLengths(p.getLengths()); - } else { - checkLengths(p.getLength()); - } - } - } catch (InvalidItemStateException e) { - LOG.debug("Property removed concurrently - ignoring", e); - } - } - } catch (InvalidItemStateException e) { - LOG.debug("Node removed concurrently - ignoring", e); - } - try { - for (NodeIterator it = n.getNodes(); it.hasNext();) { - recurse(it.nextNode(), sleep); - } - } catch (InvalidItemStateException e) { - LOG.debug("Node removed concurrently - ignoring", e); - } - checkObservationException(); - } - - private void rememberNode(String path) { - // Do nothing at the moment - // TODO It may be possible to delete some items early - /* - * To delete files early in the garbage collection scan, we could do - * this: - * - * A) If garbage collection was run before, see if there a file with the - * list of UUIDs ('uuids.txt'). - * - * B) If yes, and if the checksum is ok, read all those nodes first (if - * not so many). This updates the modified date of all old files that - * are still in use. Afterwards, delete all files with an older modified - * date than the last scan! Newer files, and files that are read have a - * newer modification date. - * - * C) Delete the 'uuids.txt' file (in any case). - * - * D) Iterate (recurse) through all nodes and properties like now. If a - * node has a binary property, store the UUID of the node in the file - * ('uuids.txt'). Also store the time when the scan started. - * - * E) Checksum and close the file. - * - * F) Like now, delete files with an older modification date than this - * scan. - * - * We can't use node path for this, UUIDs are required as nodes could be - * moved around. - * - * This mechanism requires that all data stores update the last modified - * date when calling addRecord and that record already exists. - * - */ - } - - private static void checkLengths(long... lengths) throws RepositoryException { - for (long length : lengths) { - if (length == -1) { - throw new RepositoryException("mark failed to access a property"); - } - } - } - - public void close() { - if (!closed.getAndSet(true)) { - try { - stopScan(); - } catch (RepositoryException e) { - LOG.warn("An error occured when stopping the event listener", e); - } - for (Session s : sessionList) { - s.logout(); - } - } - } - - private void checkObservationException() throws RepositoryException { - RepositoryException e = observationException; - if (e != null) { - observationException = null; - String message = "Exception while processing concurrent events"; - LOG.warn(message, e); - e = new RepositoryException(message, e); - } - } - - void onObservationException(Exception e) { - if (e instanceof RepositoryException) { - observationException = (RepositoryException) e; - } else { - observationException = new RepositoryException(e); - } - } - - /** - * Auto-close in case the application didn't call it explicitly. - */ - protected void finalize() throws Throwable { - close(); - super.finalize(); - } - - /** - * Event listener to detect moved nodes. - * A SynchronousEventListener is used to make sure this method is called before the main iteration ends. - */ - class Listener implements SynchronousEventListener { - - private final GarbageCollector gc; - private final Session session; - private final ObservationManager manager; - - Listener(GarbageCollector gc, Session session) - throws UnsupportedRepositoryOperationException, - RepositoryException { - this.gc = gc; - this.session = session; - Workspace ws = session.getWorkspace(); - manager = ws.getObservationManager(); - manager.addEventListener(this, Event.NODE_MOVED, "/", true, null, - null, false); - } - - void stop() throws RepositoryException { - manager.removeEventListener(this); - session.logout(); - } - - public void onEvent(EventIterator events) { - if (testDelay > 0) { - try { - Thread.sleep(testDelay); - } catch (InterruptedException e) { - // ignore - } - } - while (events.hasNext()) { - Event event = events.nextEvent(); - try { - String path = event.getPath(); - try { - Item item = session.getItem(path); - if (item.isNode()) { - Node n = (Node) item; - recurse(n, testDelay); - } - } catch (PathNotFoundException e) { - // ignore - } - } catch (Exception e) { - gc.onObservationException(e); - try { - stop(); - } catch (RepositoryException e2) { - LOG.warn("Exception removing the observation listener - ignored", e2); - } - } - } - } - } - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/LazyFileInputStream.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/LazyFileInputStream.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/LazyFileInputStream.java (working copy) @@ -1,167 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data; - -import java.io.File; -import java.io.FileDescriptor; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.IOException; - -import org.apache.commons.io.input.AutoCloseInputStream; - -/** - * This input stream delays opening the file until the first byte is read, and - * closes and discards the underlying stream as soon as the end of input has - * been reached or when the stream is explicitly closed. - */ -public class LazyFileInputStream extends AutoCloseInputStream { - - /** - * The file descriptor to use. - */ - protected final FileDescriptor fd; - - /** - * The file to read from. - */ - protected final File file; - - /** - * True if the input stream was opened. It is also set to true if the stream - * was closed without reading (to avoid opening the file after the stream - * was closed). - */ - protected boolean opened; - - /** - * Creates a new LazyFileInputStream for the given file. If the - * file is unreadable, a FileNotFoundException is thrown. - * The file is not opened until the first byte is read from the stream. - * - * @param file the file - * @throws java.io.FileNotFoundException - */ - public LazyFileInputStream(File file) - throws FileNotFoundException { - super(null); - if (!file.canRead()) { - throw new FileNotFoundException(file.getPath()); - } - this.file = file; - this.fd = null; - } - - /** - * Creates a new LazyFileInputStream for the given file - * descriptor. - * The file is not opened until the first byte is read from the stream. - * - * @param fd - */ - public LazyFileInputStream(FileDescriptor fd) { - super(null); - this.file = null; - this.fd = fd; - } - - /** - * Creates a new LazyFileInputStream for the given file. If the - * file is unreadable, a FileNotFoundException is thrown. - * - * @param name - * @throws java.io.FileNotFoundException - */ - public LazyFileInputStream(String name) throws FileNotFoundException { - this(new File(name)); - } - - /** - * Open the stream if required. - * - * @throws java.io.IOException - */ - protected void open() throws IOException { - if (!opened) { - opened = true; - if (fd != null) { - in = new FileInputStream(fd); - } else { - in = new FileInputStream(file); - } - } - } - - public int read() throws IOException { - open(); - return super.read(); - } - - public int available() throws IOException { - open(); - return super.available(); - } - - public void close() throws IOException { - // make sure the file is not opened afterwards - opened = true; - - // only close the file if it was in fact opened - if (in != null) { - super.close(); - } - } - - public synchronized void reset() throws IOException { - open(); - super.reset(); - } - - public boolean markSupported() { - try { - open(); - } catch (IOException e) { - throw new IllegalStateException(e.toString()); - } - return super.markSupported(); - } - - public synchronized void mark(int readlimit) { - try { - open(); - } catch (IOException e) { - throw new IllegalStateException(e.toString()); - } - super.mark(readlimit); - } - - public long skip(long n) throws IOException { - open(); - return super.skip(n); - } - - public int read(byte[] b) throws IOException { - open(); - return super.read(b, 0, b.length); - } - - public int read(byte[] b, int off, int len) throws IOException { - open(); - return super.read(b, off, len); - } - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStore.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStore.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStore.java (working copy) @@ -1,722 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data; - -import java.io.BufferedReader; -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileWriter; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStreamWriter; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Iterator; -import java.util.concurrent.locks.ReentrantLock; - -import javax.jcr.RepositoryException; - -import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.core.fs.FileSystem; -import org.apache.jackrabbit.core.fs.FileSystemException; -import org.apache.jackrabbit.core.fs.FileSystemResource; -import org.apache.jackrabbit.core.fs.local.LocalFileSystem; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A MultiDataStore can handle two independent DataStores. - *

- * Attention: You will lost the global single instance mechanism ! - *

- * It can be used if you have two storage systems. One for fast access and a - * other one like a archive DataStore on a slower storage system. All Files will - * be added to the primary DataStore. On read operations first the primary - * dataStore will be used and if no Record is found the archive DataStore will - * be used. The GarabageCollector will only remove files from the archive - * DataStore. - *

- * The internal MoveDataTask will be started automatically and could be - * configured with the following properties. - *

- * The Configuration: - * - *

- * <DataStore class="org.apache.jackrabbit.core.data.MultiDataStore">
- *     <param name="{@link #setMaxAge(int) maxAge}" value="60"/>
- *     <param name="{@link #setMoveDataTaskSleep(int) moveDataTaskSleep}" value="604800"/>
- *     <param name="{@link #setMoveDataTaskFirstRunHourOfDay(int) moveDataTaskFirstRunHourOfDay}" value="1"/>
- *     <param name="{@link #setSleepBetweenRecords(long) sleepBetweenRecords}" value="100"/>
- *     <param name="{@link #setDelayedDelete(boolean) delayedDelete}" value="false"/>
- *     <param name="{@link #setDelayedDeleteSleep(long) delayedDeleteSleep}" value="86400"/>
- *     <param name="primary" value="org.apache.jackrabbit.core.data.db.DbDataStore">
- *        <param .../>
- *     </param>
- *     <param name="archive" value="org.apache.jackrabbit.core.data.FileDataStore">
- *        <param .../>
- *     </param>
- * </DataStore>
- * 
- * - *
    - *
  • maxAge: defines how many days the content will reside in the - * primary data store. DataRecords that have been added before this time span - * will be moved to the archive data store. (default = 60)
  • - *
  • moveDataTaskSleep: specifies the sleep time of the - * moveDataTaskThread in seconds. (default = 60 * 60 * 24 * 7, which equals 7 - * days)
  • - *
  • moveDataTaskNextRunHourOfDay: specifies the hour at which - * the moveDataTaskThread initiates its first run (default = 1 - * which means 01:00 at night)
  • - *
  • sleepBetweenRecords: specifies the delay in milliseconds - * between scanning data records (default = 100)
  • - *
  • delayedDelete: its possible to delay the delete operation on - * the primary data store. The DataIdentifiers will be written to a temporary - * file. The file will be processed after a defined sleep (see - * delayedDeleteSleep) It's useful if you like to create a snapshot - * of the primary data store backend in the meantime before the data will be - * deleted. (default = false)
  • - *
  • delayedDeleteSleep: specifies the sleep time of the - * delayedDeleteTaskThread in seconds. (default = 60 * 60 * 24, which equals 1 - * day). This means the delayed delete from the primary data store will be - * processed after one day.
  • - *
- */ -public class MultiDataStore implements DataStore { - - /** - * Logger instance - */ - private static Logger log = LoggerFactory.getLogger(MultiDataStore.class); - - private DataStore primaryDataStore; - private DataStore archiveDataStore; - - /** - * Max Age in days. - */ - private int maxAge = 60; - - /** - * ReentrantLock that is used while the MoveDataTask is running. - */ - private ReentrantLock moveDataTaskLock = new ReentrantLock(); - private boolean moveDataTaskRunning = false; - private Thread moveDataTaskThread; - - /** - * The sleep time in seconds of the MoveDataTask, 7 day default. - */ - private int moveDataTaskSleep = 60 * 60 * 24 * 7; - - /** - * Indicates when the next run of the move task is scheduled. The first run - * is scheduled by default at 01:00 hours. - */ - private Calendar moveDataTaskNextRun = Calendar.getInstance(); - - /** - * Its possible to delay the delete operation on the primary data store - * while move task is running. The delete will be executed after defined - * delayDeleteSleep. - */ - private boolean delayedDelete = false; - - /** - * The sleep time in seconds to delay remove operation on the primary data - * store, 1 day default. - */ - private long delayedDeleteSleep = 60 * 60 * 24; - - /** - * File that holds the data identifiers if delayDelete is enabled. - */ - private FileSystemResource identifiersToDeleteFile = null; - - private Thread deleteDelayedIdentifiersTaskThread; - - /** - * Name of the file which holds the identifiers if deleayed delete is - * enabled - */ - private final String IDENTIFIERS_TO_DELETE_FILE_KEY = "identifiersToDelete"; - - /** - * The delay time in milliseconds between scanning data records, 100 - * default. - */ - private long sleepBetweenRecords = 100; - - { - if (moveDataTaskNextRun.get(Calendar.HOUR_OF_DAY) >= 1) { - moveDataTaskNextRun.add(Calendar.DAY_OF_MONTH, 1); - } - moveDataTaskNextRun.set(Calendar.HOUR_OF_DAY, 1); - moveDataTaskNextRun.set(Calendar.MINUTE, 0); - moveDataTaskNextRun.set(Calendar.SECOND, 0); - moveDataTaskNextRun.set(Calendar.MILLISECOND, 0); - } - - /** - * Setter for the primary dataStore - * - * @param dataStore - */ - public void setPrimaryDataStore(DataStore dataStore) { - this.primaryDataStore = dataStore; - } - - /** - * Setter for the archive dataStore - * - * @param dataStore - */ - public void setArchiveDataStore(DataStore dataStore) { - this.archiveDataStore = dataStore; - } - - /** - * Check if a record for the given identifier exists in the primary data - * store. If not found there it will be returned from the archive data - * store. If no record exists, this method returns null. - * - * @param identifier - * data identifier - * @return the record if found, and null if not - */ - public DataRecord getRecordIfStored(DataIdentifier identifier) throws DataStoreException { - if (moveDataTaskRunning) { - moveDataTaskLock.lock(); - } - try { - DataRecord dataRecord = primaryDataStore.getRecordIfStored(identifier); - if (dataRecord == null) { - dataRecord = archiveDataStore.getRecordIfStored(identifier); - } - return dataRecord; - } finally { - if (moveDataTaskRunning) { - moveDataTaskLock.unlock(); - } - } - } - - /** - * Returns the identified data record from the primary data store. If not - * found there it will be returned from the archive data store. The given - * identifier should be the identifier of a previously saved data record. - * Since records are never removed, there should never be cases where the - * identified record is not found. Abnormal cases like that are treated as - * errors and handled by throwing an exception. - * - * @param identifier - * data identifier - * @return identified data record - * @throws DataStoreException - * if the data store could not be accessed, or if the given - * identifier is invalid - */ - public DataRecord getRecord(DataIdentifier identifier) throws DataStoreException { - if (moveDataTaskRunning) { - moveDataTaskLock.lock(); - } - try { - return primaryDataStore.getRecord(identifier); - } catch (DataStoreException e) { - return archiveDataStore.getRecord(identifier); - } finally { - if (moveDataTaskRunning) { - moveDataTaskLock.unlock(); - } - } - } - - /** - * Creates a new data record in the primary data store. The given binary - * stream is consumed and a binary record containing the consumed stream is - * created and returned. If the same stream already exists in another - * record, then that record is returned instead of creating a new one. - *

- * The given stream is consumed and not closed by this - * method. It is the responsibility of the caller to close the stream. A - * typical call pattern would be: - * - *

-     *     InputStream stream = ...;
-     *     try {
-     *         record = store.addRecord(stream);
-     *     } finally {
-     *         stream.close();
-     *     }
-     * 
- * - * @param stream - * binary stream - * @return data record that contains the given stream - * @throws DataStoreException - * if the data store could not be accessed - */ - public DataRecord addRecord(InputStream stream) throws DataStoreException { - return primaryDataStore.addRecord(stream); - } - - /** - * From now on, update the modified date of an object even when accessing it - * in the archive data store. Usually, the modified date is only updated - * when creating a new object, or when a new link is added to an existing - * object. When this setting is enabled, even getLength() will update the - * modified date. - * - * @param before - * - update the modified date to the current time if it is older - * than this value - */ - public void updateModifiedDateOnAccess(long before) { - archiveDataStore.updateModifiedDateOnAccess(before); - } - - /** - * Delete objects that have a modified date older than the specified date - * from the archive data store. - * - * @param min - * the minimum time - * @return the number of data records deleted - * @throws DataStoreException - */ - public int deleteAllOlderThan(long min) throws DataStoreException { - return archiveDataStore.deleteAllOlderThan(min); - } - - /** - * Get all identifiers from the archive data store. - * - * @return an iterator over all DataIdentifier objects - * @throws DataStoreException - * if the list could not be read - */ - public Iterator getAllIdentifiers() throws DataStoreException { - return archiveDataStore.getAllIdentifiers(); - } - - public DataRecord getRecordFromReference(String reference) - throws DataStoreException { - DataRecord record = primaryDataStore.getRecordFromReference(reference); - if (record == null) { - record = archiveDataStore.getRecordFromReference(reference); - } - return record; - } - - /** - * {@inheritDoc} - */ - public void init(String homeDir) throws RepositoryException { - if (delayedDelete) { - // First initialize the identifiersToDeleteFile - LocalFileSystem fileSystem = new LocalFileSystem(); - fileSystem.setRoot(new File(homeDir)); - identifiersToDeleteFile = new FileSystemResource(fileSystem, FileSystem.SEPARATOR - + IDENTIFIERS_TO_DELETE_FILE_KEY); - } - moveDataTaskThread = new Thread(new MoveDataTask(), - "Jackrabbit-MulitDataStore-MoveDataTaskThread"); - moveDataTaskThread.setDaemon(true); - moveDataTaskThread.start(); - log.info("MultiDataStore-MoveDataTask thread started; first run scheduled at " - + moveDataTaskNextRun.getTime()); - if (delayedDelete) { - try { - // Run on startup the DeleteDelayedIdentifiersTask only if the - // file exists and modify date is older than the - // delayedDeleteSleep timeout ... - if (identifiersToDeleteFile != null - && identifiersToDeleteFile.exists() - && (identifiersToDeleteFile.lastModified() + (delayedDeleteSleep * 1000)) < System - .currentTimeMillis()) { - deleteDelayedIdentifiersTaskThread = new Thread( - //Start immediately ... - new DeleteDelayedIdentifiersTask(0L), - "Jackrabbit-MultiDataStore-DeleteDelayedIdentifiersTaskThread"); - deleteDelayedIdentifiersTaskThread.setDaemon(true); - deleteDelayedIdentifiersTaskThread.start(); - log.info("Old entries in the " + IDENTIFIERS_TO_DELETE_FILE_KEY - + " File found. DeleteDelayedIdentifiersTask-Thread started now."); - } - } catch (FileSystemException e) { - throw new RepositoryException("I/O error while reading from '" - + identifiersToDeleteFile.getPath() + "'", e); - } - } - } - - /** - * Get the minimum size of an object that should be stored in the primary - * data store. - * - * @return the minimum size in bytes - */ - public int getMinRecordLength() { - return primaryDataStore.getMinRecordLength(); - } - - /** - * {@inheritDoc} - */ - public void close() throws DataStoreException { - DataStoreException lastException = null; - // 1. close the primary data store - try { - primaryDataStore.close(); - } catch (DataStoreException e) { - lastException = e; - } - // 2. close the archive data store - try { - archiveDataStore.close(); - } catch (DataStoreException e) { - if (lastException != null) { - lastException = new DataStoreException(lastException); - } - } - // 3. if moveDataTaskThread is running interrupt it - try { - if (moveDataTaskRunning) { - moveDataTaskThread.interrupt(); - } - } catch (Exception e) { - if (lastException != null) { - lastException = new DataStoreException(lastException); - } - } - // 4. if deleteDelayedIdentifiersTaskThread is running interrupt it - try { - if (deleteDelayedIdentifiersTaskThread != null - && deleteDelayedIdentifiersTaskThread.isAlive()) { - deleteDelayedIdentifiersTaskThread.interrupt(); - } - } catch (Exception e) { - if (lastException != null) { - lastException = new DataStoreException(lastException); - } - } - if (lastException != null) { - throw lastException; - } - } - - /** - * {@inheritDoc} - */ - public void clearInUse() { - archiveDataStore.clearInUse(); - } - - public int getMaxAge() { - return maxAge; - } - - public void setMaxAge(int maxAge) { - this.maxAge = maxAge; - } - - public int getMoveDataTaskSleep() { - return moveDataTaskSleep; - } - - public int getMoveDataTaskFirstRunHourOfDay() { - return moveDataTaskNextRun.get(Calendar.HOUR_OF_DAY); - } - - public void setMoveDataTaskSleep(int sleep) { - this.moveDataTaskSleep = sleep; - } - - public void setMoveDataTaskFirstRunHourOfDay(int hourOfDay) { - moveDataTaskNextRun = Calendar.getInstance(); - if (moveDataTaskNextRun.get(Calendar.HOUR_OF_DAY) >= hourOfDay) { - moveDataTaskNextRun.add(Calendar.DAY_OF_MONTH, 1); - } - moveDataTaskNextRun.set(Calendar.HOUR_OF_DAY, hourOfDay); - moveDataTaskNextRun.set(Calendar.MINUTE, 0); - moveDataTaskNextRun.set(Calendar.SECOND, 0); - moveDataTaskNextRun.set(Calendar.MILLISECOND, 0); - } - - public void setSleepBetweenRecords(long millis) { - this.sleepBetweenRecords = millis; - } - - public long getSleepBetweenRecords() { - return sleepBetweenRecords; - } - - public boolean isDelayedDelete() { - return delayedDelete; - } - - public void setDelayedDelete(boolean delayedDelete) { - this.delayedDelete = delayedDelete; - } - - public long getDelayedDeleteSleep() { - return delayedDeleteSleep; - } - - public void setDelayedDeleteSleep(long delayedDeleteSleep) { - this.delayedDeleteSleep = delayedDeleteSleep; - } - - /** - * Writes the given DataIdentifier to the delayedDeletedFile. - * - * @param identifier - * @return boolean true if it was successful otherwise false - */ - private boolean writeDelayedDataIdentifier(DataIdentifier identifier) { - BufferedWriter writer = null; - try { - File identifierFile = new File( - ((LocalFileSystem) identifiersToDeleteFile.getFileSystem()).getPath(), - identifiersToDeleteFile.getPath()); - writer = new BufferedWriter(new FileWriter(identifierFile, true)); - writer.write(identifier.toString()); - return true; - } catch (Exception e) { - log.warn("I/O error while saving DataIdentifier (stacktrace on DEBUG log level) to '" - + identifiersToDeleteFile.getPath() + "': " + e.getMessage()); - log.debug("Root cause: ", e); - return false; - } finally { - IOUtils.closeQuietly(writer); - } - } - - /** - * Purges the delayedDeletedFile. - * - * @return boolean true if it was successful otherwise false - */ - private boolean purgeDelayedDeleteFile() { - BufferedWriter writer = null; - try { - writer = new BufferedWriter(new OutputStreamWriter( - identifiersToDeleteFile.getOutputStream())); - writer.write(""); - return true; - } catch (Exception e) { - log.warn("I/O error while purging (stacktrace on DEBUG log level) the " - + IDENTIFIERS_TO_DELETE_FILE_KEY + " file '" - + identifiersToDeleteFile.getPath() + "': " + e.getMessage()); - log.debug("Root cause: ", e); - return false; - } finally { - IOUtils.closeQuietly(writer); - } - } - - /** - * Class for maintaining the MultiDataStore. It will be used to move the - * content of the primary data store to the archive data store. - */ - public class MoveDataTask implements Runnable { - - /** - * {@inheritDoc} - */ - public void run() { - while (!Thread.currentThread().isInterrupted()) { - try { - log.info("Next move-data task run scheduled at " - + moveDataTaskNextRun.getTime()); - long sleepTime = moveDataTaskNextRun.getTimeInMillis() - - System.currentTimeMillis(); - if (sleepTime > 0) { - Thread.sleep(sleepTime); - } - moveDataTaskRunning = true; - moveOutdatedData(); - moveDataTaskRunning = false; - moveDataTaskNextRun.add(Calendar.SECOND, moveDataTaskSleep); - if (delayedDelete) { - if (deleteDelayedIdentifiersTaskThread != null - && deleteDelayedIdentifiersTaskThread.isAlive()) { - log.warn("The DeleteDelayedIdentifiersTask-Thread is already running."); - } else { - deleteDelayedIdentifiersTaskThread = new Thread( - new DeleteDelayedIdentifiersTask(delayedDeleteSleep), - "Jackrabbit-MultiDataStore-DeleteDelayedIdentifiersTaskThread"); - deleteDelayedIdentifiersTaskThread.setDaemon(true); - deleteDelayedIdentifiersTaskThread.start(); - } - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - log.warn("Interrupted: stopping move-data task."); - } - - /** - * Moves outdated data from primary to archive data store - */ - protected void moveOutdatedData() { - try { - long now = System.currentTimeMillis(); - long maxAgeMilli = 1000L * 60 * 60 * 24 * maxAge; - log.debug("Collecting all Identifiers from PrimaryDataStore..."); - Iterator allIdentifiers = primaryDataStore.getAllIdentifiers(); - int moved = 0; - while (allIdentifiers.hasNext()) { - DataIdentifier identifier = allIdentifiers.next(); - DataRecord dataRecord = primaryDataStore.getRecord(identifier); - if ((dataRecord.getLastModified() + maxAgeMilli) < now) { - try { - moveDataTaskLock.lock(); - if (delayedDelete) { - // first write it to the file and then add it to - // the archive data store ... - if (writeDelayedDataIdentifier(identifier)) { - archiveDataStore.addRecord(dataRecord.getStream()); - moved++; - } - } else { - // first add it and then delete it .. not really - // atomic ... - archiveDataStore.addRecord(dataRecord.getStream()); - ((MultiDataStoreAware) primaryDataStore).deleteRecord(identifier); - moved++; - } - if (moved % 100 == 0) { - log.debug("Moving DataRecord's... ({})", moved); - } - } catch (DataStoreException e) { - log.error("Failed to move DataRecord. DataIdentifier: " + identifier, e); - } finally { - moveDataTaskLock.unlock(); - } - } - // Give other threads time to use the MultiDataStore while - // MoveDataTask is running.. - Thread.sleep(sleepBetweenRecords); - } - if (delayedDelete) { - log.info("Moved " - + moved - + " DataRecords to the archive data store. The DataRecords in the primary data store will be removed in " - + delayedDeleteSleep + " seconds."); - } else { - log.info("Moved " + moved + " DataRecords to the archive data store."); - } - } catch (Exception e) { - log.warn("Failed to run move-data task.", e); - } - } - } - - /** - * Class to clean up the delayed DataRecords from the primary data store. - */ - public class DeleteDelayedIdentifiersTask implements Runnable { - - boolean run = true; - private long sleepTime = 0L; - - /** - * Constructor - * @param sleep how long this DeleteDelayedIdentifiersTask should sleep in seconds. - */ - public DeleteDelayedIdentifiersTask(long sleep) { - this.sleepTime = (sleep * 1000L); - } - - @Override - public void run() { - if (moveDataTaskRunning) { - log.warn("It's not supported to run the DeleteDelayedIdentifiersTask while the MoveDataTask is running."); - return; - } - while (run && !Thread.currentThread().isInterrupted()) { - if (sleepTime > 0) { - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - log.info("Start to delete DataRecords from the primary data store."); - BufferedReader reader = null; - ArrayList problemIdentifiers = new ArrayList(); - try { - int deleted = 0; - reader = new BufferedReader(new InputStreamReader( - identifiersToDeleteFile.getInputStream())); - while (true) { - String s = reader.readLine(); - if (s == null || s.equals("")) { - break; - } - DataIdentifier identifier = new DataIdentifier(s); - try { - moveDataTaskLock.lock(); - ((MultiDataStoreAware) primaryDataStore).deleteRecord(identifier); - deleted++; - } catch (DataStoreException e) { - log.error("Failed to delete DataRecord. DataIdentifier: " + identifier, - e); - problemIdentifiers.add(identifier); - } finally { - moveDataTaskLock.unlock(); - } - // Give other threads time to use the MultiDataStore - // while - // DeleteDelayedIdentifiersTask is running.. - Thread.sleep(sleepBetweenRecords); - } - log.info("Deleted " + deleted + " DataRecords from the primary data store."); - if (problemIdentifiers.isEmpty()) { - try { - identifiersToDeleteFile.delete(); - } catch (FileSystemException e) { - log.warn("Unable to delete the " + IDENTIFIERS_TO_DELETE_FILE_KEY - + " File."); - if (!purgeDelayedDeleteFile()) { - log.error("Unable to purge the " + IDENTIFIERS_TO_DELETE_FILE_KEY - + " File."); - } - } - } else { - if (purgeDelayedDeleteFile()) { - for (int x = 0; x < problemIdentifiers.size(); x++) { - writeDelayedDataIdentifier(problemIdentifiers.get(x)); - } - } - } - } catch (InterruptedException e) { - log.warn("Interrupted: stopping delayed-delete task."); - Thread.currentThread().interrupt(); - } catch (Exception e) { - log.warn("Failed to run delayed-delete task.", e); - } finally { - IOUtils.closeQuietly(reader); - run = false; - } - } - } - } - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStoreAware.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStoreAware.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/MultiDataStoreAware.java (working copy) @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data; - -import org.apache.jackrabbit.core.data.MultiDataStore.MoveDataTask; - -/** - * To use a DataStore within a MultiDataStore it must implement this - * MultiDataStoreAware Interface. It extends a DataStore to delete a single - * DataRecord. - */ -public interface MultiDataStoreAware { - - /** - * Deletes a single DataRecord based on the given identifier. Delete will - * only be used by the {@link MoveDataTask}. - * - * @param identifier - * data identifier - * @throws DataStoreException - * if the data store could not be accessed, or if the given - * identifier is invalid - */ - void deleteRecord(DataIdentifier identifier) throws DataStoreException; - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/ScanEventListener.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/ScanEventListener.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/data/ScanEventListener.java (working copy) @@ -1,26 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.data; - -import org.apache.jackrabbit.api.management.MarkEventListener; - -/** - * The listener interface for receiving garbage collection scan events. - */ -public interface ScanEventListener extends MarkEventListener { - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/BasedFileSystem.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/BasedFileSystem.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/BasedFileSystem.java (working copy) @@ -1,186 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.fs; - -import java.io.InputStream; -import java.io.OutputStream; - -/** - * A BasedFileSystem represents a 'file system in a file system'. - */ -public class BasedFileSystem implements FileSystem { - - protected final FileSystem fsBase; - - protected final String basePath; - - /** - * Creates a new BasedFileSystem - * - * @param fsBase the FileSystem the new file system should be based on - * @param relRootPath the root path relative to fsBase's root - */ - public BasedFileSystem(FileSystem fsBase, String relRootPath) { - if (fsBase == null) { - throw new IllegalArgumentException("invalid file system argument"); - } - this.fsBase = fsBase; - - if (relRootPath == null) { - throw new IllegalArgumentException("invalid null path argument"); - } - if (relRootPath.equals(SEPARATOR)) { - throw new IllegalArgumentException("invalid path argument"); - } - if (!relRootPath.startsWith(SEPARATOR)) { - relRootPath = SEPARATOR + relRootPath; - } - if (relRootPath.endsWith(SEPARATOR)) { - relRootPath = relRootPath.substring(0, relRootPath.length() - 1); - - } - this.basePath = relRootPath; - } - - protected String buildBasePath(String path) { - if (path.startsWith(SEPARATOR)) { - if (path.length() == 1) { - return basePath; - } else { - return basePath + path; - } - } else { - return basePath + SEPARATOR + path; - } - } - - //-----------------------------------------------------------< FileSystem > - /** - * {@inheritDoc} - */ - public void init() throws FileSystemException { - // check base path - if (!fsBase.isFolder(basePath)) { - fsBase.createFolder(basePath); - } - } - - /** - * {@inheritDoc} - */ - public void close() throws FileSystemException { - // do nothing; base file system should be closed explicitly - } - - /** - * {@inheritDoc} - */ - public void createFolder(String folderPath) throws FileSystemException { - fsBase.createFolder(buildBasePath(folderPath)); - } - - /** - * {@inheritDoc} - */ - public void deleteFile(String filePath) throws FileSystemException { - fsBase.deleteFile(buildBasePath(filePath)); - } - - /** - * {@inheritDoc} - */ - public void deleteFolder(String folderPath) throws FileSystemException { - fsBase.deleteFolder(buildBasePath(folderPath)); - } - - /** - * {@inheritDoc} - */ - public boolean exists(String path) throws FileSystemException { - return fsBase.exists(buildBasePath(path)); - } - - /** - * {@inheritDoc} - */ - public InputStream getInputStream(String filePath) throws FileSystemException { - return fsBase.getInputStream(buildBasePath(filePath)); - } - - /** - * {@inheritDoc} - */ - public OutputStream getOutputStream(String filePath) throws FileSystemException { - return fsBase.getOutputStream(buildBasePath(filePath)); - } - - /** - * {@inheritDoc} - */ - public boolean hasChildren(String path) throws FileSystemException { - return fsBase.hasChildren(buildBasePath(path)); - } - - /** - * {@inheritDoc} - */ - public boolean isFile(String path) throws FileSystemException { - return fsBase.isFile(buildBasePath(path)); - } - - /** - * {@inheritDoc} - */ - public boolean isFolder(String path) throws FileSystemException { - return fsBase.isFolder(buildBasePath(path)); - } - - /** - * {@inheritDoc} - */ - public long lastModified(String path) throws FileSystemException { - return fsBase.lastModified(buildBasePath(path)); - } - - /** - * {@inheritDoc} - */ - public long length(String filePath) throws FileSystemException { - return fsBase.length(buildBasePath(filePath)); - } - - /** - * {@inheritDoc} - */ - public String[] list(String folderPath) throws FileSystemException { - return fsBase.list(buildBasePath(folderPath)); - } - - /** - * {@inheritDoc} - */ - public String[] listFiles(String folderPath) throws FileSystemException { - return fsBase.listFiles(buildBasePath(folderPath)); - } - - /** - * {@inheritDoc} - */ - public String[] listFolders(String folderPath) throws FileSystemException { - return fsBase.listFolders(buildBasePath(folderPath)); - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/FileSystem.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/FileSystem.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/FileSystem.java (working copy) @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.fs; - -import java.io.InputStream; -import java.io.OutputStream; - -/** - * The FileSystem interface is an abstraction of a virtual - * file system. The similarities of its method names with with the methods - * of the java.io.File class are intentional. - *
- * Implementations of this interface expose a file system-like resource. - * File system-like resources include WebDAV-enabled servers, local file systems, - * and so forth. - */ -public interface FileSystem { - - /** - * File separator - */ - String SEPARATOR = "/"; - - /** - * File separator character - */ - char SEPARATOR_CHAR = '/'; - - /** - * Initialize the file system - * - * @throws FileSystemException if the file system initialization fails - */ - void init() throws FileSystemException; - - /** - * Close the file system. After calling this method, the file system is no - * longer accessible. - * - * @throws FileSystemException - */ - void close() throws FileSystemException; - - /** - * Returns an input stream of the contents of the file denoted by this path. - * - * @param filePath the path of the file. - * @return an input stream of the contents of the file. - * @throws FileSystemException if the file does not exist - * or if it cannot be read from - */ - InputStream getInputStream(String filePath) throws FileSystemException; - - /** - * Returns an output stream for writing bytes to the file denoted by this path. - * The file will be created if it doesn't exist. If the file exists, its contents - * will be overwritten. - * - * @param filePath the path of the file. - * @return an output stream for writing bytes to the file. - * @throws FileSystemException if the file cannot be written to or created - */ - OutputStream getOutputStream(String filePath) throws FileSystemException; - - /** - * Creates the folder named by this path, including any necessary but - * nonexistent parent folders. Note that if this operation fails it - * may have succeeded in creating some of the necessary parent folders. - * - * @param folderPath the path of the folder to be created. - * @throws FileSystemException if a file system entry denoted by path - * already exists or if another error occurs. - */ - void createFolder(String folderPath) throws FileSystemException; - - /** - * Tests whether the file system entry denoted by this path exists. - * - * @param path the path of a file system entry. - * @return true if the file system entry at path exists; false otherwise. - * @throws FileSystemException - */ - boolean exists(String path) throws FileSystemException; - - /** - * Tests whether the file system entry denoted by this path exists and - * is a file. - * - * @param path the path of a file system entry. - * @return true if the file system entry at path is a file; false otherwise. - * @throws FileSystemException - */ - boolean isFile(String path) throws FileSystemException; - - /** - * Tests whether the file system entry denoted by this path exists and - * is a folder. - * - * @param path the path of a file system entry. - * @return true if the file system entry at path is a folder; false otherwise. - * @throws FileSystemException - */ - boolean isFolder(String path) throws FileSystemException; - - /** - * Tests whether the file system entry denoted by this path has child entries. - * - * @param path the path of a file system entry. - * @return true if the file system entry at path has child entries; false otherwise. - * @throws FileSystemException - */ - boolean hasChildren(String path) throws FileSystemException; - - /** - * Returns the length of the file denoted by this path. - * - * @param filePath the path of the file. - * @return The length, in bytes, of the file denoted by this path, - * or -1L if the length can't be determined. - * @throws FileSystemException if the path does not denote an existing file. - */ - long length(String filePath) throws FileSystemException; - - /** - * Returns the time that the file system entry denoted by this path - * was last modified. - * - * @param path the path of a file system entry. - * @return A long value representing the time the file system entry was - * last modified, measured in milliseconds since the epoch - * (00:00:00 GMT, January 1, 1970), or 0L if the modification - * time can't be determined. - * @throws FileSystemException if the file system entry does not exist. - */ - long lastModified(String path) throws FileSystemException; - - /** - * Returns an array of strings naming the files and folders - * in the folder denoted by this path. - * - * @param folderPath the path of the folder whose contents is to be listed. - * @return an array of strings naming the files and folders - * in the folder denoted by this path. - * @throws FileSystemException if this path does not denote a folder or if - * another error occurs. - */ - String[] list(String folderPath) throws FileSystemException; - - /** - * Returns an array of strings naming the files in the folder - * denoted by this path. - * - * @param folderPath the path of the folder whose contents is to be listed. - * @return an array of strings naming the files in the folder - * denoted by this path. - * @throws FileSystemException if this path does not denote a folder or if - * another error occurs. - */ - String[] listFiles(String folderPath) throws FileSystemException; - - /** - * Returns an array of strings naming the folders in the folder - * denoted by this path. - * - * @param folderPath the path of the folder whose contents is to be listed. - * @return an array of strings naming the folders in the folder - * denoted by this path. - * @throws FileSystemException if this path does not denote a folder or if - * another error occurs. - */ - String[] listFolders(String folderPath) throws FileSystemException; - - /** - * Deletes the file denoted by this path. - * - * @param filePath the path of the file to be deleted. - * @throws FileSystemException if this path does not denote a file or if - * another error occurs. - */ - void deleteFile(String filePath) throws FileSystemException; - - /** - * Deletes the folder denoted by this path. Any contents of this folder - * (folders and files) will be deleted recursively. - * - * @param folderPath the path of the folder to be deleted. - * @throws FileSystemException if this path does not denote a folder or if - * another error occurs. - */ - void deleteFolder(String folderPath) throws FileSystemException; - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/FileSystemException.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/FileSystemException.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/FileSystemException.java (working copy) @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.fs; - -/** - * The FileSystemException signals an error within a file system - * operation. FileSystemExceptions are thrown by {@link FileSystem} - * implementations. - */ -public class FileSystemException extends Exception { - - /** - * Constructs a new instance of this class with the specified detail - * message. - * - * @param message the detail message. The detail message is saved for - * later retrieval by the {@link #getMessage()} method. - */ - public FileSystemException(String message) { - super(message); - } - - /** - * Constructs a new instance of this class with the specified detail - * message and root cause. - * - * @param message the detail message. The detail message is saved for - * later retrieval by the {@link #getMessage()} method. - * @param rootCause root failure cause - */ - public FileSystemException(String message, Throwable rootCause) { - super(message, rootCause); - } - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/FileSystemFactory.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/FileSystemFactory.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/FileSystemFactory.java (working copy) @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.fs; - -import javax.jcr.RepositoryException; - -/** - * Factory interface for creating {@link FileSystem} instances. Used - * to decouple the repository internals from the repository configuration - * mechanism. - */ -public interface FileSystemFactory { - - /** - * Creates, initializes, and returns a {@link FileSystem} instance - * for use by the repository. Note that no information is passed from - * the client, so all required configuration information must be - * encapsulated in the factory. - * - * @return initialized file system - * @throws RepositoryException if the file system can not be created - */ - FileSystem getFileSystem() throws RepositoryException; - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/FileSystemPathUtil.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/FileSystemPathUtil.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/FileSystemPathUtil.java (working copy) @@ -1,228 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.fs; - -import java.io.ByteArrayOutputStream; -import java.util.BitSet; - -/** - * Utility class for handling paths in a file system. - */ -public final class FileSystemPathUtil { - - /** - * Array of lowercase hexadecimal characters used in creating hex escapes. - */ - private static final char[] HEX_TABLE = "0123456789abcdef".toCharArray(); - - /** - * The escape character used to mark hex escape sequences. - */ - private static final char ESCAPE_CHAR = '%'; - - /** - * The list of characters that are not encoded by the escapeName(String) - * and unescape(String) methods. They contains the characters - * which can safely be used in file names: - */ - public static final BitSet SAFE_NAMECHARS; - - /** - * The list of characters that are not encoded by the escapePath(String) - * and unescape(String) methods. They contains the characters - * which can safely be used in file paths: - */ - public static final BitSet SAFE_PATHCHARS; - - static { - // build list of valid name characters - SAFE_NAMECHARS = new BitSet(256); - int i; - for (i = 'a'; i <= 'z'; i++) { - SAFE_NAMECHARS.set(i); - } - for (i = 'A'; i <= 'Z'; i++) { - SAFE_NAMECHARS.set(i); - } - for (i = '0'; i <= '9'; i++) { - SAFE_NAMECHARS.set(i); - } - SAFE_NAMECHARS.set('-'); - SAFE_NAMECHARS.set('_'); - SAFE_NAMECHARS.set('.'); - - // build list of valid path characters (includes name characters) - SAFE_PATHCHARS = (BitSet) SAFE_NAMECHARS.clone(); - SAFE_PATHCHARS.set(FileSystem.SEPARATOR_CHAR); - } - - /** - * private constructor - */ - private FileSystemPathUtil() { - } - - /** - * Escapes the given string using URL encoding for all bytes not included - * in the given set of safe characters. - * - * @param s the string to escape - * @param safeChars set of safe characters (bytes) - * @return escaped string - */ - private static String escape(String s, BitSet safeChars) { - byte[] bytes = s.getBytes(); - StringBuilder out = new StringBuilder(bytes.length); - for (int i = 0; i < bytes.length; i++) { - int c = bytes[i] & 0xff; - if (safeChars.get(c) && c != ESCAPE_CHAR) { - out.append((char) c); - } else { - out.append(ESCAPE_CHAR); - out.append(HEX_TABLE[(c >> 4) & 0x0f]); - out.append(HEX_TABLE[(c) & 0x0f]); - } - } - return out.toString(); - } - - /** - * Encodes the specified path. Same as - * {@link #escapeName(String)} except that the separator - * character / is regarded as a legal path character - * that needs no escaping. - * - * @param path the path to encode. - * @return the escaped path - */ - public static String escapePath(String path) { - return escape(path, SAFE_PATHCHARS); - } - - /** - * Encodes the specified name. Same as - * {@link #escapePath(String)} except that the separator character - * / is regarded as an illegal character that needs - * escaping. - * - * @param name the name to encode. - * @return the escaped name - */ - public static String escapeName(String name) { - return escape(name, SAFE_NAMECHARS); - } - - /** - * Decodes the specified path/name. - * - * @param pathOrName the escaped path/name - * @return the unescaped path/name - */ - public static String unescape(String pathOrName) { - ByteArrayOutputStream out = new ByteArrayOutputStream(pathOrName.length()); - for (int i = 0; i < pathOrName.length(); i++) { - char c = pathOrName.charAt(i); - if (c == ESCAPE_CHAR) { - try { - out.write(Integer.parseInt(pathOrName.substring(i + 1, i + 3), 16)); - } catch (NumberFormatException e) { - IllegalArgumentException iae = new IllegalArgumentException("Failed to unescape escape sequence"); - iae.initCause(e); - throw iae; - } - i += 2; - } else { - out.write(c); - } - } - return new String(out.toByteArray()); - } - - /** - * Tests whether the specified path represents the root path, i.e. "/". - * - * @param path path to test - * @return true if the specified path represents the root path; false otherwise. - */ - public static boolean denotesRoot(String path) { - return path.equals(FileSystem.SEPARATOR); - } - - /** - * Checks if path is a valid path. - * - * @param path the path to be checked - * @throws FileSystemException If path is not a valid path - */ - public static void checkFormat(String path) throws FileSystemException { - if (path == null) { - throw new FileSystemException("null path"); - } - - // path must be absolute, i.e. starting with '/' - if (!path.startsWith(FileSystem.SEPARATOR)) { - throw new FileSystemException("not an absolute path: " + path); - } - - // trailing '/' is not allowed (except for root path) - if (path.endsWith(FileSystem.SEPARATOR) && path.length() > 1) { - throw new FileSystemException("malformed path: " + path); - } - - String[] names = path.split(FileSystem.SEPARATOR); - for (int i = 1; i < names.length; i++) { - // name must not be empty - if (names[i].length() == 0) { - throw new FileSystemException("empty name: " + path); - } - // leading/trailing whitespace is not allowed - String trimmed = names[i].trim(); - if (!trimmed.equals(names[i])) { - throw new FileSystemException("illegal leading or trailing whitespace in name: " + path); - } - } - } - - /** - * Returns the parent directory of the specified path. - * - * @param path a file system path denoting a directory or a file. - * @return the parent directory. - */ - public static String getParentDir(String path) { - int pos = path.lastIndexOf(FileSystem.SEPARATOR_CHAR); - if (pos > 0) { - return path.substring(0, pos); - } - return FileSystem.SEPARATOR; - } - - /** - * Returns the name of the specified path. - * - * @param path a file system path denoting a directory or a file. - * @return the name. - */ - public static String getName(String path) { - int pos = path.lastIndexOf(FileSystem.SEPARATOR_CHAR); - if (pos != -1) { - return path.substring(pos + 1); - } - return path; - } - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/FileSystemResource.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/FileSystemResource.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/FileSystemResource.java (working copy) @@ -1,225 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.fs; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - -import org.apache.commons.io.IOUtils; - -/** - * A FileSystemResource represents a resource (i.e. file) in a - * FileSystem. - */ -public class FileSystemResource { - - protected final FileSystem fs; - - protected final String path; - - static { - // preload FileSystemPathUtil to prevent classloader issues during shutdown - FileSystemPathUtil.class.hashCode(); - } - - /** - * Creates a new FileSystemResource - * - * @param fs the FileSystem where the resource is located - * @param path the path of the resource in the FileSystem - */ - public FileSystemResource(FileSystem fs, String path) { - if (fs == null) { - throw new IllegalArgumentException("invalid file system argument"); - } - this.fs = fs; - - if (path == null) { - throw new IllegalArgumentException("invalid path argument"); - } - this.path = path; - } - - /** - * Returns the FileSystem where this resource is located. - * - * @return the FileSystem where this resource is located. - */ - public FileSystem getFileSystem() { - return fs; - } - - /** - * Returns the path of this resource. - * - * @return the path of this resource. - */ - public String getPath() { - return path; - } - - /** - * Returns the parent directory of this resource. - * - * @return the parent directory. - */ - public String getParentDir() { - return FileSystemPathUtil.getParentDir(path); - } - - /** - * Returns the name of this resource. - * - * @return the name. - */ - public String getName() { - return FileSystemPathUtil.getName(path); - } - - /** - * Creates the parent directory of this resource, including any necessary - * but nonexistent parent directories. - * - * @throws FileSystemException - */ - public synchronized void makeParentDirs() throws FileSystemException { - String parentDir = getParentDir(); - if (!fs.exists(parentDir)) { - fs.createFolder(parentDir); - } - } - - /** - * Deletes this resource. - * Same as {@link #delete(false)}. - * - * @see FileSystem#deleteFile - */ - public void delete() throws FileSystemException { - delete(false); - } - - /** - * Deletes this resource. - * - * @param pruneEmptyParentDirs if true, empty parent folders will - * automatically be deleted - * @see FileSystem#deleteFile - */ - public synchronized void delete(boolean pruneEmptyParentDirs) throws FileSystemException { - fs.deleteFile(path); - if (pruneEmptyParentDirs) { - // prune empty parent folders - String parentDir = FileSystemPathUtil.getParentDir(path); - while (!parentDir.equals(FileSystem.SEPARATOR) - && fs.exists(parentDir) - && !fs.hasChildren(parentDir)) { - fs.deleteFolder(parentDir); - parentDir = FileSystemPathUtil.getParentDir(parentDir); - } - } - } - - /** - * @see FileSystem#exists - */ - public boolean exists() throws FileSystemException { - return fs.exists(path); - } - - /** - * @see FileSystem#getInputStream - */ - public InputStream getInputStream() throws FileSystemException { - return fs.getInputStream(path); - } - - /** - * Spools this resource to the given output stream. - * - * @param out output stream where to spool the resource - * @throws FileSystemException if the input stream for this resource could - * not be obtained - * @throws IOException if an error occurs while while spooling - * @see FileSystem#getInputStream - */ - public void spool(OutputStream out) throws FileSystemException, IOException { - InputStream in = fs.getInputStream(path); - try { - IOUtils.copy(in, out); - } finally { - IOUtils.closeQuietly(in); - } - } - - /** - * @see FileSystem#getOutputStream - */ - public OutputStream getOutputStream() throws FileSystemException { - return fs.getOutputStream(path); - } - - /** - * @see FileSystem#lastModified - */ - public long lastModified() throws FileSystemException { - return fs.lastModified(path); - } - - /** - * @see FileSystem#length - */ - public long length() throws FileSystemException { - return fs.length(path); - } - - //-------------------------------------------< java.lang.Object overrides > - /** - * Returns the path string of this resource. This is just the - * string returned by the {@link #getPath} method. - * - * @return The path string of this resource - */ - public String toString() { - return getPath(); - } - - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj instanceof FileSystemResource) { - FileSystemResource other = (FileSystemResource) obj; - return (path == null ? other.path == null : path.equals(other.path)) - && (fs == null ? other.fs == null : fs.equals(other.fs)); - } - return false; - } - - /** - * Returns zero to satisfy the Object equals/hashCode contract. - * This class is mutable and not meant to be used as a hash key. - * - * @return always zero - * @see Object#hashCode() - */ - public int hashCode() { - return 0; - } - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/local/FileUtil.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/local/FileUtil.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/local/FileUtil.java (working copy) @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.fs.local; - -import java.io.File; -import java.io.IOException; - -import org.apache.commons.io.FileUtils; - -/** - * Static utility methods for recursively copying and deleting files and - * directories. - */ -public final class FileUtil { - - /** - * private constructor - */ - private FileUtil() { - } - - /** - * Recursively copies the given file or directory to the - * given destination. - * - * @param src source file or directory - * @param dest destination file or directory - * @throws IOException if the file or directory cannot be copied - */ - public static void copy(File src, File dest) throws IOException { - if (!src.canRead()) { - throw new IOException(src.getPath() + " can't be read from."); - } - if (src.isDirectory()) { - // src is a folder - if (dest.isFile()) { - throw new IOException("can't copy a folder to a file"); - } - if (!dest.exists()) { - dest.mkdirs(); - } - if (!dest.canWrite()) { - throw new IOException("can't write to " + dest.getPath()); - } - File[] children = src.listFiles(); - for (int i = 0; i < children.length; i++) { - copy(children[i], new File(dest, children[i].getName())); - } - } else { - // src is a file - File destParent; - if (dest.isDirectory()) { - // dest is a folder - destParent = dest; - dest = new File(destParent, src.getName()); - } else { - destParent = dest.getParentFile(); - } - if (!destParent.canWrite()) { - throw new IOException("can't write to " + destParent.getPath()); - } - - FileUtils.copyFile(src, dest); - } - } - - /** - * Recursively deletes the given file or directory. - * - * @param f file or directory - * @throws IOException if the file or directory cannot be deleted - */ - public static void delete(File f) throws IOException { - if (f.isDirectory()) { - // it's a folder, list children first - File[] children = f.listFiles(); - for (int i = 0; i < children.length; i++) { - delete(children[i]); - } - } - if (!f.delete()) { - throw new IOException("Unable to delete " + f.getPath()); - } - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/local/HandleMonitor.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/local/HandleMonitor.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/local/HandleMonitor.java (working copy) @@ -1,217 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.fs.local; - -import org.apache.jackrabbit.util.LazyFileInputStream; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; -import java.util.HashMap; -import java.util.HashSet; - -/** - * This Class implements a very simple open handle monitor for the local - * file system. This is usefull, if the list of open handles, referenced by - * an open FileInputStream() should be tracked. This can cause problems on - * windows filesystems where open files cannot be deleted. - */ -public class HandleMonitor { - - /** - * The default logger - */ - private static Logger log = LoggerFactory.getLogger(HandleMonitor.class); - - /** - * the map of open handles (key=File, value=Handle) - */ - private HashMap openHandles = new HashMap(); - - /** - * Opens a file and returns an InputStream - * - * @param file - * @return - * @throws FileNotFoundException - */ - public InputStream open(File file) throws FileNotFoundException { - Handle handle = getHandle(file); - InputStream in = handle.open(); - return in; - } - - /** - * Checks, if the file is open - * @param file - * @return - */ - public boolean isOpen(File file) { - return openHandles.containsKey(file); - } - - /** - * Closes a file - * @param file - */ - private void close(File file) { - openHandles.remove(file); - } - - /** - * Returns the handle for a file. - * @param file - * @return - */ - private Handle getHandle(File file) { - Handle handle = openHandles.get(file); - if (handle == null) { - handle = new Handle(file); - openHandles.put(file, handle); - } - return handle; - } - - /** - * Dumps the contents of this monitor - */ - public void dump() { - log.info("Number of open files: " + openHandles.size()); - for (File file : openHandles.keySet()) { - Handle handle = openHandles.get(file); - handle.dump(); - } - } - - /** - * Dumps the information for a file - * @param file - */ - public void dump(File file) { - Handle handle = openHandles.get(file); - if (handle != null) { - handle.dump(true); - } - } - - /** - * Class representing all open handles to a file - */ - private class Handle { - - /** - * the file of this handle - */ - private File file; - - /** - * all open streams of this handle - */ - private HashSet streams = new HashSet(); - - /** - * Creates a new handle for a file - * @param file - */ - private Handle(File file) { - this.file = file; - } - - /** - * opens a stream for this handle - * @return - * @throws FileNotFoundException - */ - private InputStream open() throws FileNotFoundException { - Handle.MonitoredInputStream in = new Handle.MonitoredInputStream(file); - streams.add(in); - return in; - } - - /** - * Closes a stream - * @param in - */ - private void close(MonitoredInputStream in) { - streams.remove(in); - if (streams.isEmpty()) { - HandleMonitor.this.close(file); - } - } - - /** - * Dumps this handle - */ - private void dump() { - dump(false); - } - - /** - * Dumps this handle - */ - private void dump(boolean detailed) { - if (detailed) { - log.info("- " + file.getPath() + ", " + streams.size()); - for (Handle.MonitoredInputStream in : streams) { - in.dump(); - } - } else { - log.info("- " + file.getPath() + ", " + streams.size()); - } - } - - /** - * Delegating input stream that registers/unregisters itself from the - * handle. - */ - private class MonitoredInputStream extends LazyFileInputStream { - - /** - * throwable of the time, the stream was created - */ - private final Throwable throwable = new Exception(); - - /** - * {@inheritDoc} - */ - private MonitoredInputStream(File file) throws FileNotFoundException { - super(file); - } - - /** - * dumps this stream - */ - private void dump() { - log.info("- opened by : ", throwable); - } - - /** - * {@inheritDoc} - */ - public void close() throws IOException { - // remove myself from the set - Handle.this.close(this); - super.close(); - } - - } - } - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/local/LocalFileSystem.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/local/LocalFileSystem.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/local/LocalFileSystem.java (working copy) @@ -1,386 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.fs.local; - -import org.apache.jackrabbit.core.fs.FileSystem; -import org.apache.jackrabbit.core.fs.FileSystemException; -import org.apache.jackrabbit.util.LazyFileInputStream; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileFilter; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - -/** - * A LocalFileSystem ... - */ -public class LocalFileSystem implements FileSystem { - - private static Logger log = LoggerFactory.getLogger(LocalFileSystem.class); - - private File root; - - private HandleMonitor monitor; - - /** - * Default constructor - */ - public LocalFileSystem() { - } - - public String getPath() { - if (root != null) { - return root.getPath(); - } else { - return null; - } - } - - /** - * Sets the path to the root directory of this local filesystem. please note - * that this method can be called via reflection during initialization and - * must not be altered. - * - * @param rootPath the path to the root directory - */ - public void setPath(String rootPath) { - setRoot(new File(osPath(rootPath))); - } - - public void setRoot(File root) { - this.root = root; - } - - /** - * Enables/Disables the use of the handle monitor. - * - * @param enable - */ - public void setEnableHandleMonitor(String enable) { - setEnableHandleMonitor(Boolean.valueOf(enable).booleanValue()); - } - - /** - * Enables/Disables the use of the handle monitor. - * - * @param enable flag - */ - public void setEnableHandleMonitor(boolean enable) { - if (enable && monitor == null) { - monitor = new HandleMonitor(); - } - if (!enable && monitor != null) { - monitor = null; - } - } - - /** - * Returns true if use of the handle monitor is currently - * enabled, otherwise returns false. - * - * @see #setEnableHandleMonitor(boolean) - */ - public String getEnableHandleMonitor() { - return monitor == null ? "false" : "true"; - } - - private String osPath(String genericPath) { - if (File.separator.equals(SEPARATOR)) { - return genericPath; - } - return genericPath.replace(SEPARATOR_CHAR, File.separatorChar); - } - - //-------------------------------------------< java.lang.Object overrides > - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj instanceof LocalFileSystem) { - LocalFileSystem other = (LocalFileSystem) obj; - if (root == null) { - return other.root == null; - } else { - return root.equals(other.root); - } - } - return false; - } - - /** - * Returns zero to satisfy the Object equals/hashCode contract. - * This class is mutable and not meant to be used as a hash key. - * - * @return always zero - * @see Object#hashCode() - */ - public int hashCode() { - return 0; - } - - //-----------------------------------------------------------< FileSystem > - /** - * {@inheritDoc} - */ - public void init() throws FileSystemException { - if (root == null) { - String msg = "root directory not set"; - log.debug(msg); - throw new FileSystemException(msg); - } - - if (root.exists()) { - if (!root.isDirectory()) { - String msg = "path does not denote a folder"; - log.debug(msg); - throw new FileSystemException(msg); - } - } else { - if (!root.mkdirs()) { - String msg = "failed to create root"; - log.debug(msg); - throw new FileSystemException(msg); - } - } - log.info("LocalFileSystem initialized at path " + root.getPath()); - if (monitor != null) { - log.info("LocalFileSystem using handle monitor"); - } - } - - /** - * {@inheritDoc} - */ - public void close() throws FileSystemException { - root = null; - } - - /** - * {@inheritDoc} - */ - public void createFolder(String folderPath) throws FileSystemException { - File f = new File(root, osPath(folderPath)); - if (f.exists()) { - String msg = f.getPath() + " already exists"; - log.debug(msg); - throw new FileSystemException(msg); - } - if (!f.mkdirs()) { - String msg = "failed to create folder " + f.getPath(); - log.debug(msg); - throw new FileSystemException(msg); - } - } - - /** - * {@inheritDoc} - */ - public void deleteFile(String filePath) throws FileSystemException { - File f = new File(root, osPath(filePath)); - if (!f.isFile()) { - String msg = f.getPath() + " does not denote an existing file"; - throw new FileSystemException(msg); - } - try { - FileUtil.delete(f); - } catch (IOException ioe) { - String msg = "failed to delete " + f.getPath(); - if (monitor != null && monitor.isOpen(f)) { - log.error("Unable to delete. There are still open streams."); - monitor.dump(f); - } - - throw new FileSystemException(msg, ioe); - } - } - - /** - * {@inheritDoc} - */ - public void deleteFolder(String folderPath) throws FileSystemException { - File f = new File(root, osPath(folderPath)); - if (!f.isDirectory()) { - String msg = f.getPath() + " does not denote an existing folder"; - log.debug(msg); - throw new FileSystemException(msg); - } - try { - FileUtil.delete(f); - } catch (IOException ioe) { - String msg = "failed to delete " + f.getPath(); - log.debug(msg); - throw new FileSystemException(msg, ioe); - } - } - - /** - * {@inheritDoc} - */ - public boolean exists(String path) throws FileSystemException { - File f = new File(root, osPath(path)); - return f.exists(); - } - - /** - * {@inheritDoc} - */ - public InputStream getInputStream(String filePath) - throws FileSystemException { - File f = new File(root, osPath(filePath)); - try { - if (monitor == null) { - return new LazyFileInputStream(f); - } else { - return monitor.open(f); - } - } catch (FileNotFoundException fnfe) { - String msg = f.getPath() + " does not denote an existing file"; - log.debug(msg); - throw new FileSystemException(msg, fnfe); - } - } - - /** - * {@inheritDoc} - */ - public OutputStream getOutputStream(String filePath) - throws FileSystemException { - File f = new File(root, osPath(filePath)); - try { - return new FileOutputStream(f); - } catch (FileNotFoundException fnfe) { - String msg = "failed to get output stream for " + f.getPath(); - log.debug(msg); - throw new FileSystemException(msg, fnfe); - } - } - - /** - * {@inheritDoc} - */ - public boolean hasChildren(String path) throws FileSystemException { - File f = new File(root, osPath(path)); - if (!f.exists()) { - String msg = f.getPath() + " does not exist"; - log.debug(msg); - throw new FileSystemException(msg); - } - if (f.isFile()) { - return false; - } - return (f.list().length > 0); - } - - /** - * {@inheritDoc} - */ - public boolean isFile(String path) throws FileSystemException { - File f = new File(root, osPath(path)); - return f.isFile(); - } - - /** - * {@inheritDoc} - */ - public boolean isFolder(String path) throws FileSystemException { - File f = new File(root, osPath(path)); - return f.isDirectory(); - } - - /** - * {@inheritDoc} - */ - public long lastModified(String path) throws FileSystemException { - File f = new File(root, osPath(path)); - return f.lastModified(); - } - - /** - * {@inheritDoc} - */ - public long length(String filePath) throws FileSystemException { - File f = new File(root, osPath(filePath)); - if (!f.exists()) { - return -1; - } - return f.length(); - } - - /** - * {@inheritDoc} - */ - public String[] list(String folderPath) throws FileSystemException { - File f = new File(root, osPath(folderPath)); - String[] entries = f.list(); - if (entries == null) { - String msg = folderPath + " does not denote a folder"; - log.debug(msg); - throw new FileSystemException(msg); - } - return entries; - } - - /** - * {@inheritDoc} - */ - public String[] listFiles(String folderPath) throws FileSystemException { - File folder = new File(root, osPath(folderPath)); - File[] files = folder.listFiles(new FileFilter() { - public boolean accept(File f) { - return f.isFile(); - } - }); - if (files == null) { - String msg = folderPath + " does not denote a folder"; - log.debug(msg); - throw new FileSystemException(msg); - } - String[] entries = new String[files.length]; - for (int i = 0; i < files.length; i++) { - entries[i] = files[i].getName(); - } - return entries; - } - - /** - * {@inheritDoc} - */ - public String[] listFolders(String folderPath) throws FileSystemException { - File file = new File(root, osPath(folderPath)); - File[] folders = file.listFiles(new FileFilter() { - public boolean accept(File f) { - return f.isDirectory(); - } - }); - if (folders == null) { - String msg = folderPath + " does not denote a folder"; - log.debug(msg); - throw new FileSystemException(msg); - } - String[] entries = new String[folders.length]; - for (int i = 0; i < folders.length; i++) { - entries[i] = folders[i].getName(); - } - return entries; - } - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/RandomAccessOutputStream.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/RandomAccessOutputStream.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/fs/RandomAccessOutputStream.java (working copy) @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.fs; - -import java.io.IOException; -import java.io.OutputStream; - -/** - * Extends the regular java.io.OutputStream with a random - * access facility. Multiple write() operations can be - * positioned off sequence with the {@link #seek} method. - * - * @deprecated this class should no longer be used - */ -public abstract class RandomAccessOutputStream extends OutputStream { - - /** - * Sets the current position in the resource where the next write - * will occur. - * - * @param position the new position in the resource. - * @throws IOException if an error occurs while seeking to the position. - */ - public abstract void seek(long position) throws IOException; -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/gc/GarbageCollector.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/gc/GarbageCollector.java (revision 0) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/gc/GarbageCollector.java (working copy) @@ -0,0 +1,511 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.gc; + +import org.apache.jackrabbit.api.management.DataStoreGarbageCollector; +import org.apache.jackrabbit.api.management.MarkEventListener; +import org.apache.jackrabbit.core.RepositoryContext; +import org.apache.jackrabbit.core.SessionImpl; +import org.apache.jackrabbit.core.data.DataStore; +import org.apache.jackrabbit.core.id.NodeId; +import org.apache.jackrabbit.core.id.PropertyId; +import org.apache.jackrabbit.core.observation.SynchronousEventListener; +import org.apache.jackrabbit.core.persistence.IterablePersistenceManager; +import org.apache.jackrabbit.core.persistence.util.NodeInfo; +import org.apache.jackrabbit.core.state.ItemStateException; +import org.apache.jackrabbit.core.state.NoSuchItemStateException; +import org.apache.jackrabbit.core.state.NodeState; +import org.apache.jackrabbit.core.state.PropertyState; +import org.apache.jackrabbit.core.value.InternalValue; +import org.apache.jackrabbit.spi.Name; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +import javax.jcr.InvalidItemStateException; +import javax.jcr.Item; +import javax.jcr.Node; +import javax.jcr.NodeIterator; +import javax.jcr.PathNotFoundException; +import javax.jcr.Property; +import javax.jcr.PropertyIterator; +import javax.jcr.PropertyType; +import javax.jcr.RepositoryException; +import javax.jcr.Session; +import javax.jcr.UnsupportedRepositoryOperationException; +import javax.jcr.Workspace; +import javax.jcr.observation.Event; +import javax.jcr.observation.EventIterator; +import javax.jcr.observation.ObservationManager; + +/** + * Garbage collector for DataStore. This implementation iterates through all + * nodes and reads the binary properties. To detect nodes that are moved while + * the scan runs, event listeners are started. Like the well known garbage + * collection in Java, the items that are still in use are marked. Currently + * this is achieved by updating the modified date of the entries. Newly added + * entries are detected because the modified date is changed when they are + * added. + *

+ * Example code to run the data store garbage collection: + *

+ * JackrabbitRepositoryFactory jf = (JackrabbitRepositoryFactory) factory;
+ * RepositoryManager m = jf.getRepositoryManager((JackrabbitRepository) repository);
+ * GarbageCollector gc = m.createDataStoreGarbageCollector();
+ * try {
+ *     gc.mark();
+ *     gc.sweep();
+ * } finally {
+ *     gc.close();
+ * }
+ * 
+ */ +public class GarbageCollector implements DataStoreGarbageCollector { + + /** logger instance */ + static final Logger LOG = LoggerFactory.getLogger(GarbageCollector.class); + + /** + * The number of nodes to fetch at once from the persistence manager. Defaults to 8kb + */ + private static final int NODESATONCE = Integer.getInteger("org.apache.jackrabbit.garbagecollector.nodesatonce", 1024 * 8); + + /** + * Set this System Property to true to speed up the node traversing in a binary focused repository. + * See JCR-3708 + */ + private static final boolean NODE_ID_SCAN = Boolean.getBoolean("org.apache.jackrabbit.garbagecollector.node_id.scan"); + + private MarkEventListener callback; + + private long sleepBetweenNodes; + + protected int testDelay; + + private final DataStore store; + + private long startScanTimestamp; + + private final ArrayList listeners = new ArrayList(); + + private final IterablePersistenceManager[] pmList; + + private final SessionImpl[] sessionList; + + private final AtomicBoolean closed = new AtomicBoolean(); + + private final RepositoryContext context; + + private boolean persistenceManagerScan; + + private volatile RepositoryException observationException; + + /** + * Create a new garbage collector. + * This method is usually not called by the application, it is called + * by SessionImpl.createDataStoreGarbageCollector(). + * + * @param context repository context + * @param dataStore the data store to be garbage-collected + * @param list the persistence managers + * @param sessionList the sessions to access the workspaces + */ + + public GarbageCollector(RepositoryContext context, + DataStore dataStore, IterablePersistenceManager[] list, + SessionImpl[] sessionList) { + this.context = context; + this.store = dataStore; + this.pmList = list; + this.persistenceManagerScan = list != null; + this.sessionList = sessionList; + } + + public void setSleepBetweenNodes(long millis) { + this.sleepBetweenNodes = millis; + } + + public long getSleepBetweenNodes() { + return sleepBetweenNodes; + } + + /** + * When testing the garbage collection, a delay is used instead of simulating concurrent access. + * + * @param testDelay the delay in milliseconds + */ + public void setTestDelay(int testDelay) { + this.testDelay = testDelay; + } + + public void setMarkEventListener(MarkEventListener callback) { + this.callback = callback; + } + + public void mark() throws RepositoryException { + if (store == null) { + throw new RepositoryException("No DataStore configured."); + } + long now = System.currentTimeMillis(); + if (startScanTimestamp == 0) { + startScanTimestamp = now; + store.updateModifiedDateOnAccess(startScanTimestamp); + } + + if (pmList == null || !persistenceManagerScan) { + for (SessionImpl s : sessionList) { + scanNodes(s); + } + } else { + try { + if (!NODE_ID_SCAN) { + scanPersistenceManagersByNodeInfos(); + } else { + scanPersistenceManagersByNodeIds(); + } + } catch (ItemStateException e) { + throw new RepositoryException(e); + } + } + } + + private void scanNodes(SessionImpl session) throws RepositoryException { + + // add a listener to get 'moved' nodes + Session clonedSession = session.createSession(session.getWorkspace().getName()); + listeners.add(new Listener(this, clonedSession)); + + // adding a link to a BLOB updates the modified date + // reading usually doesn't, but when scanning, it does + recurse(session.getRootNode(), sleepBetweenNodes); + } + + public void setPersistenceManagerScan(boolean allow) { + persistenceManagerScan = allow; + } + + public boolean isPersistenceManagerScan() { + return persistenceManagerScan; + } + + private void scanPersistenceManagersByNodeInfos() throws RepositoryException, ItemStateException { + int pmCount = 0; + for (IterablePersistenceManager pm : pmList) { + pmCount++; + int count = 0; + Map batch = pm.getAllNodeInfos(null, NODESATONCE); + while (!batch.isEmpty()) { + NodeId lastId = null; + for (NodeInfo info : batch.values()) { + count++; + if (count % 1000 == 0) { + LOG.debug(pm.toString() + " ("+pmCount + "/" + pmList.length + "): analyzed " + count + " nodes..."); + } + lastId = info.getId(); + if (callback != null) { + callback.beforeScanning(null); + } + if (info.hasBlobsInDataStore()) { + try { + NodeState state = pm.load(info.getId()); + Set propertyNames = state.getPropertyNames(); + for (Name name : propertyNames) { + PropertyId pid = new PropertyId(info.getId(), name); + PropertyState ps = pm.load(pid); + if (ps.getType() == PropertyType.BINARY) { + for (InternalValue v : ps.getValues()) { + // getLength will update the last modified date + // if the persistence manager scan is running + v.getLength(); + } + } + } + } catch (NoSuchItemStateException ignored) { + // the node may have been deleted in the meantime + } + } + } + batch = pm.getAllNodeInfos(lastId, NODESATONCE); + } + } + NodeInfo.clearPool(); + } + + private void scanPersistenceManagersByNodeIds() throws RepositoryException, ItemStateException { + int pmCount = 0; + for (IterablePersistenceManager pm : pmList) { + pmCount++; + List allNodeIds = pm.getAllNodeIds(null, 0); + int overAllCount = allNodeIds.size(); + int count = 0; + for (NodeId id : allNodeIds) { + count++; + if (count % 1000 == 0) { + LOG.debug(pm.toString() + " ("+pmCount + "/" + pmList.length + "): analyzed " + count + " nodes [" + overAllCount + "]..."); + } + if (callback != null) { + callback.beforeScanning(null); + } + try { + NodeState state = pm.load(id); + Set propertyNames = state.getPropertyNames(); + for (Name name : propertyNames) { + PropertyId pid = new PropertyId(id, name); + PropertyState ps = pm.load(pid); + if (ps.getType() == PropertyType.BINARY) { + for (InternalValue v : ps.getValues()) { + // getLength will update the last modified date + // if the persistence manager scan is running + v.getLength(); + } + } + } + } catch (NoSuchItemStateException e) { + // the node may have been deleted or moved in the meantime + // ignore it + } + } + } + } + + /** + * Reset modifiedDateOnAccess to 0 and stop the observation + * listener if any are installed. + */ + public void stopScan() throws RepositoryException { + // reset updateModifiedDateOnAccess to OL + store.updateModifiedDateOnAccess(0L); + + if (listeners.size() > 0) { + for (Listener listener : listeners) { + listener.stop(); + } + listeners.clear(); + } + checkObservationException(); + context.setGcRunning(false); + } + + public int sweep() throws RepositoryException { + if (startScanTimestamp == 0) { + throw new RepositoryException("scan must be called first"); + } + stopScan(); + return store.deleteAllOlderThan(startScanTimestamp); + } + + /** + * Get the data store if one is used. + * + * @return the data store, or null + */ + public DataStore getDataStore() { + return store; + } + + void recurse(final Node n, long sleep) throws RepositoryException { + if (sleep > 0) { + try { + Thread.sleep(sleep); + } catch (InterruptedException e) { + // ignore + } + } + if (callback != null) { + callback.beforeScanning(n); + } + try { + for (PropertyIterator it = n.getProperties(); it.hasNext();) { + Property p = it.nextProperty(); + try { + if (p.getType() == PropertyType.BINARY) { + if (n.hasProperty("jcr:uuid")) { + rememberNode(n.getProperty("jcr:uuid").getString()); + } else { + rememberNode(n.getPath()); + } + if (p.isMultiple()) { + checkLengths(p.getLengths()); + } else { + checkLengths(p.getLength()); + } + } + } catch (InvalidItemStateException e) { + LOG.debug("Property removed concurrently - ignoring", e); + } + } + } catch (InvalidItemStateException e) { + LOG.debug("Node removed concurrently - ignoring", e); + } + try { + for (NodeIterator it = n.getNodes(); it.hasNext();) { + recurse(it.nextNode(), sleep); + } + } catch (InvalidItemStateException e) { + LOG.debug("Node removed concurrently - ignoring", e); + } + checkObservationException(); + } + + private void rememberNode(String path) { + // Do nothing at the moment + // TODO It may be possible to delete some items early + /* + * To delete files early in the garbage collection scan, we could do + * this: + * + * A) If garbage collection was run before, see if there a file with the + * list of UUIDs ('uuids.txt'). + * + * B) If yes, and if the checksum is ok, read all those nodes first (if + * not so many). This updates the modified date of all old files that + * are still in use. Afterwards, delete all files with an older modified + * date than the last scan! Newer files, and files that are read have a + * newer modification date. + * + * C) Delete the 'uuids.txt' file (in any case). + * + * D) Iterate (recurse) through all nodes and properties like now. If a + * node has a binary property, store the UUID of the node in the file + * ('uuids.txt'). Also store the time when the scan started. + * + * E) Checksum and close the file. + * + * F) Like now, delete files with an older modification date than this + * scan. + * + * We can't use node path for this, UUIDs are required as nodes could be + * moved around. + * + * This mechanism requires that all data stores update the last modified + * date when calling addRecord and that record already exists. + * + */ + } + + private static void checkLengths(long... lengths) throws RepositoryException { + for (long length : lengths) { + if (length == -1) { + throw new RepositoryException("mark failed to access a property"); + } + } + } + + public void close() { + if (!closed.getAndSet(true)) { + try { + stopScan(); + } catch (RepositoryException e) { + LOG.warn("An error occured when stopping the event listener", e); + } + for (Session s : sessionList) { + s.logout(); + } + } + } + + private void checkObservationException() throws RepositoryException { + RepositoryException e = observationException; + if (e != null) { + observationException = null; + String message = "Exception while processing concurrent events"; + LOG.warn(message, e); + e = new RepositoryException(message, e); + } + } + + void onObservationException(Exception e) { + if (e instanceof RepositoryException) { + observationException = (RepositoryException) e; + } else { + observationException = new RepositoryException(e); + } + } + + /** + * Auto-close in case the application didn't call it explicitly. + */ + protected void finalize() throws Throwable { + close(); + super.finalize(); + } + + /** + * Event listener to detect moved nodes. + * A SynchronousEventListener is used to make sure this method is called before the main iteration ends. + */ + class Listener implements SynchronousEventListener { + + private final GarbageCollector gc; + private final Session session; + private final ObservationManager manager; + + Listener(GarbageCollector gc, Session session) + throws UnsupportedRepositoryOperationException, + RepositoryException { + this.gc = gc; + this.session = session; + Workspace ws = session.getWorkspace(); + manager = ws.getObservationManager(); + manager.addEventListener(this, Event.NODE_MOVED, "/", true, null, + null, false); + } + + void stop() throws RepositoryException { + manager.removeEventListener(this); + session.logout(); + } + + public void onEvent(EventIterator events) { + if (testDelay > 0) { + try { + Thread.sleep(testDelay); + } catch (InterruptedException e) { + // ignore + } + } + while (events.hasNext()) { + Event event = events.nextEvent(); + try { + String path = event.getPath(); + try { + Item item = session.getItem(path); + if (item.isNode()) { + Node n = (Node) item; + recurse(n, testDelay); + } + } catch (PathNotFoundException e) { + // ignore + } + } catch (Exception e) { + gc.onObservationException(e); + try { + stop(); + } catch (RepositoryException e2) { + LOG.warn("Exception removing the observation listener - ignored", e2); + } + } + } + } + } + +} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/InternalXAResource.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/InternalXAResource.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/InternalXAResource.java (working copy) @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core; - -/** - * Interface implemented by resources that provide XA functionality. - */ -public interface InternalXAResource { - - /** - * Associate this resource with a transaction. All further operations on - * the object should be interpreted as part of this transaction and changes - * recorded in some attribute of the transaction context. - * @param tx transaction context, if null disassociate - */ - void associate(TransactionContext tx); - - /** - * Invoked before one of the {@link #prepare}, {@link #commit} or - * {@link #rollback} method is called. - * @param tx transaction context - */ - void beforeOperation(TransactionContext tx); - - /** - * Prepare transaction. The transaction is identified by a transaction - * context. - * @param tx transaction context - * @throws TransactionException if an error occurs - */ - void prepare(TransactionContext tx) throws TransactionException; - - /** - * Commit transaction. The transaction is identified by a transaction - * context. If the method throws, other resources get their changes - * rolled back. - * @param tx transaction context - * @throws TransactionException if an error occurs - */ - void commit(TransactionContext tx) throws TransactionException; - - /** - * Rollback transaction. The transaction is identified by a transaction - * context. - * @param tx transaction context. - */ - void rollback(TransactionContext tx) throws TransactionException; - - /** - * Invoked after one of the {@link #prepare}, {@link #commit} or - * {@link #rollback} method has been called. - * @param tx transaction context - */ - void afterOperation(TransactionContext tx); - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/lock/XAEnvironment.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/lock/XAEnvironment.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/lock/XAEnvironment.java (working copy) @@ -16,11 +16,11 @@ */ package org.apache.jackrabbit.core.lock; -import org.apache.jackrabbit.core.TransactionException; import org.apache.jackrabbit.core.NodeImpl; import org.apache.jackrabbit.core.SessionImpl; import org.apache.jackrabbit.core.WorkspaceImpl; import org.apache.jackrabbit.core.id.NodeId; +import org.apache.jackrabbit.data.core.TransactionException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/lock/XALockManager.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/lock/XALockManager.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/lock/XALockManager.java (working copy) @@ -16,11 +16,11 @@ */ package org.apache.jackrabbit.core.lock; -import org.apache.jackrabbit.core.InternalXAResource; import org.apache.jackrabbit.core.NodeImpl; import org.apache.jackrabbit.core.SessionImpl; -import org.apache.jackrabbit.core.TransactionContext; -import org.apache.jackrabbit.core.TransactionException; +import org.apache.jackrabbit.data.core.InternalXAResource; +import org.apache.jackrabbit.data.core.TransactionContext; +import org.apache.jackrabbit.data.core.TransactionException; import org.apache.jackrabbit.spi.Path; import javax.jcr.RepositoryException; Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/RepositoryImpl.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/RepositoryImpl.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/RepositoryImpl.java (working copy) @@ -72,10 +72,10 @@ import org.apache.jackrabbit.core.config.WorkspaceConfig; import org.apache.jackrabbit.core.data.DataStore; import org.apache.jackrabbit.core.data.DataStoreException; -import org.apache.jackrabbit.core.data.GarbageCollector; import org.apache.jackrabbit.core.fs.FileSystem; import org.apache.jackrabbit.core.fs.FileSystemException; import org.apache.jackrabbit.core.fs.FileSystemResource; +import org.apache.jackrabbit.core.gc.GarbageCollector; import org.apache.jackrabbit.core.id.NodeId; import org.apache.jackrabbit.core.id.NodeIdFactory; import org.apache.jackrabbit.core.lock.LockManager; @@ -106,6 +106,7 @@ import org.apache.jackrabbit.core.version.InternalVersionManager; import org.apache.jackrabbit.core.version.InternalVersionManagerImpl; import org.apache.jackrabbit.core.xml.ClonedInputSource; +import org.apache.jackrabbit.data.core.TransactionException; import org.apache.jackrabbit.spi.commons.name.NameConstants; import org.apache.jackrabbit.spi.commons.namespace.NamespaceResolver; import org.apache.jackrabbit.spi.commons.namespace.RegistryNamespaceResolver; Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/SessionImpl.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/SessionImpl.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/SessionImpl.java (working copy) @@ -70,7 +70,7 @@ import org.apache.jackrabbit.api.security.user.UserManager; import org.apache.jackrabbit.commons.AbstractSession; import org.apache.jackrabbit.core.config.WorkspaceConfig; -import org.apache.jackrabbit.core.data.GarbageCollector; +import org.apache.jackrabbit.core.gc.GarbageCollector; import org.apache.jackrabbit.core.id.NodeId; import org.apache.jackrabbit.core.nodetype.NodeTypeManagerImpl; import org.apache.jackrabbit.core.observation.ObservationManagerImpl; Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/state/DefaultISMLocking.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/state/DefaultISMLocking.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/state/DefaultISMLocking.java (working copy) @@ -16,8 +16,8 @@ */ package org.apache.jackrabbit.core.state; -import static org.apache.jackrabbit.core.TransactionContext.getCurrentThreadId; -import static org.apache.jackrabbit.core.TransactionContext.isSameThreadId; +import static org.apache.jackrabbit.data.core.TransactionContext.getCurrentThreadId; +import static org.apache.jackrabbit.data.core.TransactionContext.isSameThreadId; import org.apache.jackrabbit.core.id.ItemId; Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/state/FineGrainedISMLocking.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/state/FineGrainedISMLocking.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/state/FineGrainedISMLocking.java (working copy) @@ -16,8 +16,8 @@ */ package org.apache.jackrabbit.core.state; -import static org.apache.jackrabbit.core.TransactionContext.getCurrentThreadId; -import static org.apache.jackrabbit.core.TransactionContext.isSameThreadId; +import static org.apache.jackrabbit.data.core.TransactionContext.getCurrentThreadId; +import static org.apache.jackrabbit.data.core.TransactionContext.isSameThreadId; import java.util.Collections; import java.util.HashMap; Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/state/XAItemStateManager.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/state/XAItemStateManager.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/state/XAItemStateManager.java (working copy) @@ -26,9 +26,6 @@ import org.apache.commons.collections.Predicate; import org.apache.commons.collections.iterators.FilterIterator; -import org.apache.jackrabbit.core.InternalXAResource; -import org.apache.jackrabbit.core.TransactionContext; -import org.apache.jackrabbit.core.TransactionException; import org.apache.jackrabbit.core.id.ItemId; import org.apache.jackrabbit.core.id.NodeId; import org.apache.jackrabbit.core.id.PropertyId; @@ -35,6 +32,9 @@ import org.apache.jackrabbit.core.observation.EventStateCollectionFactory; import org.apache.jackrabbit.core.value.InternalValue; import org.apache.jackrabbit.core.virtual.VirtualItemStateProvider; +import org.apache.jackrabbit.data.core.InternalXAResource; +import org.apache.jackrabbit.data.core.TransactionContext; +import org.apache.jackrabbit.data.core.TransactionException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/TransactionContext.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/TransactionContext.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/TransactionContext.java (working copy) @@ -1,376 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core; - -import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; - -import javax.transaction.xa.XAException; -import javax.transaction.xa.Xid; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Represents the transaction on behalf of the component that wants to - * explicitly demarcate transaction boundaries. After having been prepared, - * schedules a task that rolls back the transaction if some time passes without - * any further action. This will guarantee that global objects locked by one - * of the resources' {@link InternalXAResource#prepare} method, are eventually - * unlocked. - */ -public class TransactionContext { - - /** - * Logger instance. - */ - private static final Logger log = LoggerFactory.getLogger(TransactionContext.class); - - private static final int STATUS_PREPARING = 1; - private static final int STATUS_PREPARED = 2; - private static final int STATUS_COMMITTING = 3; - private static final int STATUS_COMMITTED = 4; - private static final int STATUS_ROLLING_BACK = 5; - private static final int STATUS_ROLLED_BACK = 6; - - /** - * The per thread associated Xid - */ - private static final ThreadLocal CURRENT_XID = new ThreadLocal(); - - /** - * Transactional resources. - */ - private final InternalXAResource[] resources; - - /** - * The Xid - */ - private final Xid xid; - - /** - * Transaction attributes. - */ - private final Map attributes = new HashMap(); - - /** - * Status. - */ - private int status; - - /** - * Flag indicating whether the association is currently suspended. - */ - private boolean suspended; - - /** - * Create a new instance of this class. - * - * @param xid associated xid - * @param resources transactional resources - */ - public TransactionContext(Xid xid, InternalXAResource[] resources) { - this.xid = xid; - this.resources = resources; - } - - /** - * Set an attribute on this transaction. If the value specified is - * null, it is semantically equivalent to - * {@link #removeAttribute}. - * - * @param name attribute name - * @param value attribute value - */ - public void setAttribute(String name, Object value) { - if (value == null) { - removeAttribute(name); - } - attributes.put(name, value); - } - - /** - * Return an attribute value on this transaction. - * - * @param name attribute name - * @return attribute value, null if no attribute with that - * name exists - */ - public Object getAttribute(String name) { - return attributes.get(name); - } - - /** - * Remove an attribute on this transaction. - * - * @param name attribute name - */ - public void removeAttribute(String name) { - attributes.remove(name); - } - - /** - * Prepare the transaction identified by this context. Prepares changes on - * all resources. If some resource reports an error on prepare, - * automatically rollback changes on all other resources. Throw exception - * at the end if errors were found. - * - * @throws XAException if an error occurs - */ - public synchronized void prepare() throws XAException { - bindCurrentXid(); - status = STATUS_PREPARING; - beforeOperation(); - - TransactionException txe = null; - for (int i = 0; i < resources.length; i++) { - try { - resources[i].prepare(this); - } catch (TransactionException e) { - txe = e; - break; - } catch (Exception e) { - txe = new TransactionException("Error while preparing resource " + resources, e); - break; - } - } - - afterOperation(); - status = STATUS_PREPARED; - - if (txe != null) { - // force immediate rollback on error. - try { - rollback(); - } catch (XAException e) { - /* ignore */ - } - XAException e = new XAException(XAException.XA_RBOTHER); - e.initCause(txe); - throw e; - } - } - - /** - * Commit the transaction identified by this context. Commits changes on - * all resources. If some resource reports an error on commit, - * automatically rollback changes on all other resources. Throw - * exception at the end if some commit failed. - * - * @throws XAException if an error occurs - */ - public synchronized void commit() throws XAException { - if (status == STATUS_ROLLED_BACK) { - throw new XAException(XAException.XA_HEURRB); - } - - boolean heuristicCommit = false; - bindCurrentXid(); - status = STATUS_COMMITTING; - beforeOperation(); - - TransactionException txe = null; - for (int i = 0; i < resources.length; i++) { - InternalXAResource resource = resources[i]; - if (txe != null) { - try { - resource.rollback(this); - } catch (Exception e) { - log.warn("Unable to rollback changes on " + resource, e); - } - } else { - try { - resource.commit(this); - heuristicCommit = true; - } catch (TransactionException e) { - txe = e; - } catch (Exception e) { - txe = new TransactionException("Error while committing resource " + resource, e); - } - } - } - afterOperation(); - status = STATUS_COMMITTED; - - cleanCurrentXid(); - - if (txe != null) { - XAException e = null; - if (heuristicCommit) { - e = new XAException(XAException.XA_HEURMIX); - } else { - e = new XAException(XAException.XA_HEURRB); - } - e.initCause(txe); - throw e; - } - } - - /** - * Rollback the transaction identified by this context. Rolls back changes - * on all resources. Throws exception at the end if errors were found. - * @throws XAException if an error occurs - */ - public synchronized void rollback() throws XAException { - if (status == STATUS_ROLLED_BACK) { - throw new XAException(XAException.XA_RBOTHER); - } - bindCurrentXid(); - status = STATUS_ROLLING_BACK; - beforeOperation(); - - int errors = 0; - for (int i = 0; i < resources.length; i++) { - InternalXAResource resource = resources[i]; - try { - resource.rollback(this); - } catch (Exception e) { - log.warn("Unable to rollback changes on " + resource, e); - errors++; - } - } - afterOperation(); - status = STATUS_ROLLED_BACK; - - cleanCurrentXid(); - - if (errors != 0) { - throw new XAException(XAException.XA_RBOTHER); - } - } - - /** - * Invoke all of the registered resources' {@link InternalXAResource#beforeOperation} - * methods. - */ - private void beforeOperation() { - for (int i = 0; i < resources.length; i++) { - resources[i].beforeOperation(this); - } - } - - /** - * Invoke all of the registered resources' {@link InternalXAResource#afterOperation} - * methods. - */ - private void afterOperation() { - for (int i = 0; i < resources.length; i++) { - resources[i].afterOperation(this); - } - } - - /** - * Return a flag indicating whether the association is suspended. - * - * @return true if the association is suspended; - * false otherwise - */ - public boolean isSuspended() { - return suspended; - } - - /** - * Set a flag indicating whether the association is suspended. - * - * @param suspended flag whether that the association is suspended. - */ - public void setSuspended(boolean suspended) { - this.suspended = suspended; - } - - /** - * Helper Method to bind the {@link Xid} associated with this {@link TransactionContext} - * to the {@link #CURRENT_XID} ThreadLocal. - */ - private void bindCurrentXid() { - CURRENT_XID.set(xid); - } - - /** - * Helper Method to clean the {@link Xid} associated with this {@link TransactionContext} - * from the {@link #CURRENT_XID} ThreadLocal. - */ - private void cleanCurrentXid() { - CURRENT_XID.set(null); - } - - /** - * Returns the {@link Xid} bind to the {@link #CURRENT_XID} ThreadLocal - * @return current Xid or null - */ - private static Xid getCurrentXid() { - return CURRENT_XID.get(); - } - - /** - * Returns the current thread identifier. The identifier is either the - * current thread instance or the global transaction identifier wrapped - * in a {@link XidWrapper}, when running under a transaction. - * - * @return current thread identifier - */ - public static Object getCurrentThreadId() { - Xid xid = TransactionContext.getCurrentXid(); - if (xid != null) { - return new XidWrapper(xid.getGlobalTransactionId()); - } else { - return Thread.currentThread(); - } - } - - /** - * Compares the given thread identifiers for equality. - * - * @see #getCurrentThreadId() - */ - public static boolean isSameThreadId(Object a, Object b) { - if (a == b) { - return true; - } else if (a != null) { - return a.equals(b); - } else { - return false; - } - } - - /** - * Wrapper around a global transaction id (byte[]) - * that handles hashCode and equals in a proper way. - */ - private static class XidWrapper { - private byte[] gtid; - - public XidWrapper(byte[] gtid) { - this.gtid = gtid; - } - - @Override - public boolean equals(Object other) { - if (!(other instanceof XidWrapper)) { - return false; - } - return Arrays.equals((byte[]) gtid, ((XidWrapper)other).gtid); - } - - @Override - public int hashCode() { - return Arrays.hashCode(gtid); - } - } - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/TransactionException.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/TransactionException.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/TransactionException.java (working copy) @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core; - -/** - * TransactionException is thrown when some operation inside the transaction - * fails. - */ -public class TransactionException extends Exception { - - /** - * Creates an instance of this class. Takes a detail message as parameter. - * - * @param message message - */ - public TransactionException(String message) { - super(message); - } - - /** - * Creates an instance of this class. Takes a message and a root throwable - * as parameter. - * - * @param message message - * @param rootCause root throwable - */ - public TransactionException(String message, Throwable rootCause) { - super(message, rootCause); - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/CheckSchemaOperation.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/CheckSchemaOperation.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/CheckSchemaOperation.java (working copy) @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.util.db; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.sql.SQLException; -import java.util.HashMap; -import java.util.Map; - -import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.util.Text; - -/** - * An operation which synchronously checks the DB schema in the {@link #run()} method. The - * {@link #addVariableReplacement(String, String)} method return the instance to enable method chaining. - */ -public class CheckSchemaOperation { - - public static final String SCHEMA_OBJECT_PREFIX_VARIABLE = "${schemaObjectPrefix}"; - - public static final String TABLE_SPACE_VARIABLE = "${tableSpace}"; - - private final ConnectionHelper conHelper; - - private final InputStream ddl; - - private final String table; - - private final Map varReplacement = new HashMap(); - - /** - * @param connectionhelper the connection helper - * @param ddlStream the stream of the DDL to use to create the schema if necessary (closed by the - * {@link #run()} method) - * @param tableName the name of the table to use for the schema-existence-check - */ - public CheckSchemaOperation(ConnectionHelper connectionhelper, InputStream ddlStream, String tableName) { - conHelper = connectionhelper; - ddl = ddlStream; - table = tableName; - } - - /** - * Adds a variable replacement mapping. - * - * @param var the variable - * @param replacement the replacement value - * @return this - */ - public CheckSchemaOperation addVariableReplacement(String var, String replacement) { - varReplacement.put(var, replacement); - return this; - } - - /** - * Checks if the required schema objects exist and creates them if they don't exist yet. - * - * @throws SQLException if an error occurs - * @throws IOException if an error occurs - */ - public void run() throws SQLException, IOException { - try { - if (!conHelper.tableExists(table)) { - BufferedReader reader = new BufferedReader(new InputStreamReader(ddl)); - String sql = reader.readLine(); - while (sql != null) { - // Skip comments and empty lines - if (!sql.startsWith("#") && sql.length() > 0) { - // replace prefix variable - sql = replace(sql); - // execute sql stmt - conHelper.exec(sql); - } - // read next sql stmt - sql = reader.readLine(); - } - } - } finally { - IOUtils.closeQuietly(ddl); - } - } - - /** - * Applies the variable replacement to the given string. - * - * @param sql the string in which to replace variables - * @return the new string - */ - private String replace(String sql) { - String result = sql; - for (Map.Entry entry : varReplacement.entrySet()) { - result = Text.replace(result, entry.getKey(), entry.getValue()).trim(); - } - return result; - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/ConnectionFactory.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/ConnectionFactory.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/ConnectionFactory.java (working copy) @@ -1,376 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.util.db; - -import java.sql.Connection; -import java.sql.Driver; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import javax.jcr.RepositoryException; -import javax.naming.Context; -import javax.naming.NamingException; -import javax.sql.DataSource; - -import org.apache.commons.dbcp.BasicDataSource; -import org.apache.commons.dbcp.DelegatingConnection; -import org.apache.commons.pool.impl.GenericObjectPool; -import org.apache.jackrabbit.core.config.DataSourceConfig; -import org.apache.jackrabbit.core.config.DataSourceConfig.DataSourceDefinition; -import org.apache.jackrabbit.util.Base64; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A factory for new database connections. - * Supported are regular JDBC drivers, as well as - * JNDI resources. - * - * FIXME: the registry currently is ClassLoader wide. I.e., if you start two repositories - * then you share the registered datasources... - */ -public final class ConnectionFactory { - - private static final Logger log = LoggerFactory.getLogger(ConnectionFactory.class); - - /** - * The lock to protect the fields of this class. - */ - private final Object lock = new Object(); - - /** - * The data sources without logical name. The keys in the map are based on driver-url-user combination. - */ - private final Map keyToDataSource = new HashMap(); - - /** - * The configured data sources with logical name. The keys in the map are the logical name. - */ - private final Map nameToDataSource = new HashMap(); - - /** - * The configured data source defs. The keys in the map are the logical name. - */ - private final Map nameToDataSourceDef = new HashMap(); - - /** - * The list of data sources created by this factory. - */ - private final List created = new ArrayList(); - - private boolean closed = false; - - /** - * Registers a number of data sources. - * - * @param dsc the {@link DataSourceConfig} which contains the configuration - */ - public void registerDataSources(DataSourceConfig dsc) throws RepositoryException { - synchronized (lock) { - sanityCheck(); - for (DataSourceDefinition def : dsc.getDefinitions()) { - Class driverClass = getDriverClass(def.getDriver()); - if (driverClass != null - && Context.class.isAssignableFrom(driverClass)) { - DataSource ds = getJndiDataSource((Class) driverClass, def.getUrl()); - nameToDataSource.put(def.getLogicalName(), ds); - nameToDataSourceDef.put(def.getLogicalName(), def); - } else { - BasicDataSource bds = - getDriverDataSource(driverClass, def.getUrl(), def.getUser(), def.getPassword()); - if (def.getMaxPoolSize() > 0) { - bds.setMaxActive(def.getMaxPoolSize()); - } - if (def.getValidationQuery() != null && !"".equals(def.getValidationQuery().trim())) { - bds.setValidationQuery(def.getValidationQuery()); - } - nameToDataSource.put(def.getLogicalName(), bds); - nameToDataSourceDef.put(def.getLogicalName(), def); - } - } - } - } - - /** - * Retrieves a configured data source by logical name. - * - * @param logicalName the name of the {@code DataSource} - * @return a {@code DataSource} - * @throws RepositoryException if there is no {@code DataSource} with the given name - */ - public DataSource getDataSource(String logicalName) throws RepositoryException { - synchronized (lock) { - sanityCheck(); - DataSource ds = nameToDataSource.get(logicalName); - if (ds == null) { - throw new RepositoryException("DataSource with logicalName " + logicalName - + " has not been configured"); - } - return ds; - } - } - - /** - * @param logicalName the name of the {@code DataSource} - * @return the configured database type - * @throws RepositoryException if there is no {@code DataSource} with the given name - */ - public String getDataBaseType(String logicalName) throws RepositoryException { - synchronized (lock) { - sanityCheck(); - DataSourceDefinition def = nameToDataSourceDef.get(logicalName); - if (def == null) { - throw new RepositoryException("DataSource with logicalName " + logicalName - + " has not been configured"); - } - return def.getDbType(); - } - } - - /** - * Retrieve a {@code DataSource} for the specified properties. - * This can be a JNDI Data Source as well. To do that, - * the driver class name must reference a {@code javax.naming.Context} class - * (for example {@code javax.naming.InitialContext}), and the URL must be the JNDI URL - * (for example {@code java:comp/env/jdbc/Test}). - * - * @param driver the JDBC driver or the Context class - * @param url the database URL - * @param user the user name - * @param password the password - * @return the {@code DataSource} - * @throws RepositoryException if the driver could not be loaded - * @throws SQLException if the connection could not be established - */ - public DataSource getDataSource(String driver, String url, String user, String password) - throws RepositoryException, SQLException { - final String key = driver + url + user; - synchronized(lock) { - sanityCheck(); - DataSource ds = keyToDataSource.get(key); - if (ds == null) { - ds = createDataSource( - driver, url, user, Base64.decodeIfEncoded(password)); - keyToDataSource.put(key, ds); - } - return ds; - } - } - - /** - * - */ - public void close() { - synchronized(lock) { - sanityCheck(); - for (BasicDataSource ds : created) { - try { - ds.close(); - } catch (SQLException e) { - log.error("failed to close " + ds, e); - } - } - keyToDataSource.clear(); - nameToDataSource.clear(); - nameToDataSourceDef.clear(); - created.clear(); - closed = true; - } - } - - /** - * Needed for pre-10R2 Oracle blob support....:( - * - * This method actually assumes that we are using commons DBCP 1.2.2. - * - * @param con the commons-DBCP {@code DelegatingConnection} to unwrap - * @return the unwrapped connection - */ - public static Connection unwrap(Connection con) throws SQLException { - if (con instanceof DelegatingConnection) { - return ((DelegatingConnection)con).getInnermostDelegate(); - } else { - throw new SQLException("failed to unwrap connection of class " + con.getClass().getName() + - ", expected it to be a " + DelegatingConnection.class.getName()); - } - } - - private void sanityCheck() { - if (closed) { - throw new IllegalStateException("this factory has already been closed"); - } - } - - /** - * Create a new pooling data source or finds an existing JNDI data source (depends on driver). - * - * @param driver - * @param url - * @param user - * @param password - * @return - * @throws RepositoryException - */ - private DataSource createDataSource(String driver, String url, String user, String password) - throws RepositoryException { - Class driverClass = getDriverClass(driver); - if (driverClass != null - && Context.class.isAssignableFrom(driverClass)) { - @SuppressWarnings("unchecked") - DataSource database = getJndiDataSource((Class) driverClass, url); - if (user == null && password == null) { - return database; - } else { - return new DataSourceWrapper(database, user, password); - } - } else { - return getDriverDataSource(driverClass, url, user, password); - } - } - - /** - * Loads and returns the given JDBC driver (or JNDI context) class. - * Returns null if a class name is not given. - * - * @param driver driver class name - * @return driver class, or null - * @throws RepositoryException if the class can not be loaded - */ - private Class getDriverClass(String driver) - throws RepositoryException { - try { - if (driver != null && driver.length() > 0) { - return Class.forName(driver); - } else { - return null; - } - } catch (ClassNotFoundException e) { - throw new RepositoryException( - "Could not load JDBC driver class " + driver, e); - } - } - - /** - * Returns the JDBC {@link DataSource} bound to the given name in - * the JNDI {@link Context} identified by the given class. - * - * @param contextClass class that is instantiated to get the JNDI context - * @param name name of the DataSource within the JNDI context - * @return the DataSource bound in JNDI - * @throws RepositoryException if the JNDI context can not be accessed, - * or if the named DataSource is not found - */ - private DataSource getJndiDataSource( - Class contextClass, String name) - throws RepositoryException { - try { - Object object = contextClass.newInstance().lookup(name); - if (object instanceof DataSource) { - return (DataSource) object; - } else { - throw new RepositoryException( - "Object " + object + " with JNDI name " - + name + " is not a JDBC DataSource"); - } - } catch (InstantiationException e) { - throw new RepositoryException( - "Invalid JNDI context: " + contextClass.getName(), e); - } catch (IllegalAccessException e) { - throw new RepositoryException( - "Invalid JNDI context: " + contextClass.getName(), e); - } catch (NamingException e) { - throw new RepositoryException( - "JNDI name not found: " + name, e); - } - } - - /** - * Creates and returns a pooling JDBC {@link DataSource} for accessing - * the database identified by the given driver class and JDBC - * connection URL. The driver class can be null if - * a specific driver has not been configured. - * - * @param driverClass the JDBC driver class, or null - * @param url the JDBC connection URL - * @return pooling DataSource for accessing the specified database - */ - private BasicDataSource getDriverDataSource( - Class driverClass, String url, String user, String password) { - BasicDataSource ds = new BasicDataSource(); - created.add(ds); - - if (driverClass != null) { - Driver instance = null; - try { - // Workaround for Apache Derby: - // The JDBC specification recommends the Class.forName - // method without the .newInstance() method call, - // but it is required after a Derby 'shutdown' - instance = (Driver) driverClass.newInstance(); - } catch (Throwable e) { - // Ignore exceptions as there's no requirement for - // a JDBC driver class to have a public default constructor - } - if (instance != null) { - if (instance.jdbcCompliant()) { - // JCR-3445 At the moment the PostgreSQL isn't compliant because it doesn't implement this method... - ds.setValidationQueryTimeout(3); - } - } - ds.setDriverClassName(driverClass.getName()); - } - - ds.setUrl(url); - ds.setUsername(user); - ds.setPassword(password); - ds.setDefaultAutoCommit(true); - ds.setTestOnBorrow(false); - ds.setTestWhileIdle(true); - ds.setTimeBetweenEvictionRunsMillis(600000); // 10 Minutes - ds.setMinEvictableIdleTimeMillis(60000); // 1 Minute - ds.setMaxActive(-1); // unlimited - ds.setMaxIdle(GenericObjectPool.DEFAULT_MAX_IDLE + 10); - ds.setValidationQuery(guessValidationQuery(url)); - ds.setAccessToUnderlyingConnectionAllowed(true); - ds.setPoolPreparedStatements(true); - ds.setMaxOpenPreparedStatements(-1); // unlimited - return ds; - } - - private String guessValidationQuery(String url) { - if (url.contains("derby")) { - return "values(1)"; - } else if (url.contains("mysql")) { - return "select 1"; - } else if (url.contains("sqlserver") || url.contains("jtds")) { - return "select 1"; - } else if (url.contains("oracle")) { - return "select 'validationQuery' from dual"; - } else if (url.contains("postgresql")) { - return "select 1"; - } else if (url.contains("h2")) { - return "select 1"; - } else if (url.contains("db2")) { - return "values(1)"; - } - log.warn("Failed to guess validation query for URL " + url); - return null; - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/ConnectionHelper.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/ConnectionHelper.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/ConnectionHelper.java (working copy) @@ -1,598 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.util.db; - -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import javax.sql.DataSource; - -import org.apache.jackrabbit.core.TransactionContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class provides convenience methods to execute SQL statements. They can be either executed in isolation - * or within the context of a JDBC transaction; the so-called batch mode (use the {@link #startBatch()} - * and {@link #endBatch(boolean)} methods for this). - * - *

- * - * This class contains logic to retry execution of SQL statements. If this helper is not in batch mode - * and if a statement fails due to an {@code SQLException}, then it is retried. If the {@code block} argument - * of the constructor call was {@code false} then it is retried only once. Otherwise the statement is retried - * until either it succeeds or the thread is interrupted. This clearly assumes that the only cause of {@code - * SQLExceptions} is faulty {@code Connections} which are restored eventually.
Note: - * This retry logic only applies to the following methods: - *

    - *
  • {@link #exec(String, Object...)}
  • - *
  • {@link #update(String, Object[])}
  • - *
  • {@link #exec(String, Object[], boolean, int)}
  • - *
- * - *

- * - * This class is not thread-safe and if it is to be used by multiple threads then the clients must make sure - * that access to this class is properly synchronized. - * - *

- * - * Implementation note: The {@code Connection} that is retrieved from the {@code DataSource} - * in {@link #getConnection()} may be broken. This is so because if an internal {@code DataSource} is used, - * then this is a commons-dbcp {@code DataSource} with a testWhileIdle validation strategy (see - * the {@code ConnectionFactory} class). Furthermore, if it is a {@code DataSource} obtained through JNDI then we - * can make no assumptions about the validation strategy. This means that our retry logic must either assume that - * the SQL it tries to execute can do so without errors (i.e., the statement is valid), or it must implement its - * own validation strategy to apply. Currently, the former is in place. - */ -public class ConnectionHelper { - - static Logger log = LoggerFactory.getLogger(ConnectionHelper.class); - - private static final int RETRIES = 1; - - private static final int SLEEP_BETWEEN_RETRIES_MS = 100; - - final boolean blockOnConnectionLoss; - - private final boolean checkTablesWithUserName; - - protected final DataSource dataSource; - - private Map batchConnectionMap = Collections.synchronizedMap(new HashMap()); - - /** - * The default fetchSize is '0'. This means the fetchSize Hint will be ignored - */ - private int fetchSize = 0; - - /** - * @param dataSrc the {@link DataSource} on which this instance acts - * @param block whether the helper should transparently block on DB connection loss (otherwise it retries - * once and if that fails throws exception) - */ - public ConnectionHelper(DataSource dataSrc, boolean block) { - dataSource = dataSrc; - checkTablesWithUserName = false; - blockOnConnectionLoss = block; - } - - /** - * @param dataSrc the {@link DataSource} on which this instance acts - * @param checkWithUserName whether the username is to be used for the {@link #tableExists(String)} method - * @param block whether the helper should transparently block on DB connection loss (otherwise it throws exceptions) - */ - protected ConnectionHelper(DataSource dataSrc, boolean checkWithUserName, boolean block) { - dataSource = dataSrc; - checkTablesWithUserName = checkWithUserName; - blockOnConnectionLoss = block; - } - - /** - * @param dataSrc the {@link DataSource} on which this instance acts - * @param checkWithUserName whether the username is to be used for the {@link #tableExists(String)} method - * @param block whether the helper should transparently block on DB connection loss (otherwise it throws exceptions) - * @param fetchSize the fetchSize that will be used per default - */ - protected ConnectionHelper(DataSource dataSrc, boolean checkWithUserName, boolean block, int fetchSize) { - dataSource = dataSrc; - checkTablesWithUserName = checkWithUserName; - blockOnConnectionLoss = block; - this.fetchSize = fetchSize; - } - - /** - * A utility method that makes sure that identifier does only consist of characters that are - * allowed in names on the target database. Illegal characters will be escaped as necessary. - * - * This method is not affected by the - * - * @param identifier the identifier to convert to a db specific identifier - * @return the db-normalized form of the given identifier - * @throws SQLException if an error occurs - */ - public final String prepareDbIdentifier(String identifier) throws SQLException { - if (identifier == null) { - return null; - } - String legalChars = "ABCDEFGHIJKLMNOPQRSTUVWXZY0123456789_"; - legalChars += getExtraNameCharacters(); - String id = identifier.toUpperCase(); - StringBuilder escaped = new StringBuilder(); - for (int i = 0; i < id.length(); i++) { - char c = id.charAt(i); - if (legalChars.indexOf(c) == -1) { - replaceCharacter(escaped, c); - } else { - escaped.append(c); - } - } - return escaped.toString(); - } - - /** - * Called from {@link #prepareDbIdentifier(String)}. Default implementation replaces the illegal - * characters with their hexadecimal encoding. - * - * @param escaped the escaped db identifier - * @param c the character to replace - */ - protected void replaceCharacter(StringBuilder escaped, char c) { - escaped.append("_x"); - String hex = Integer.toHexString(c); - escaped.append("0000".toCharArray(), 0, 4 - hex.length()); - escaped.append(hex); - escaped.append("_"); - } - - /** - * Returns true if we are currently in a batch mode, false otherwise. - * - * @return true if the current thread or the active transaction is running in batch mode, false otherwise. - */ - protected boolean inBatchMode() { - return getTransactionAwareBatchConnection() != null; - } - - /** - * The default implementation returns the {@code extraNameCharacters} provided by the databases metadata. - * - * @return the additional characters for identifiers supported by the db - * @throws SQLException on error - */ - private String getExtraNameCharacters() throws SQLException { - Connection con = dataSource.getConnection(); - try { - DatabaseMetaData metaData = con.getMetaData(); - return metaData.getExtraNameCharacters(); - } finally { - DbUtility.close(con, null, null); - } - } - - /** - * Checks whether the given table exists in the database. - * - * @param tableName the name of the table - * @return whether the given table exists - * @throws SQLException on error - */ - public final boolean tableExists(String tableName) throws SQLException { - Connection con = dataSource.getConnection(); - ResultSet rs = null; - boolean schemaExists = false; - String name = tableName; - try { - DatabaseMetaData metaData = con.getMetaData(); - if (metaData.storesLowerCaseIdentifiers()) { - name = tableName.toLowerCase(); - } else if (metaData.storesUpperCaseIdentifiers()) { - name = tableName.toUpperCase(); - } - String userName = null; - if (checkTablesWithUserName) { - userName = metaData.getUserName(); - } - rs = metaData.getTables(null, userName, name, null); - schemaExists = rs.next(); - } finally { - DbUtility.close(con, null, rs); - } - return schemaExists; - } - - /** - * Starts the batch mode. If an {@link SQLException} is thrown, then the batch mode is not started.

- * Important: clients that call this method must make sure that - * {@link #endBatch(boolean)} is called eventually. - * - * @throws SQLException on error - */ - public final void startBatch() throws SQLException { - if (inBatchMode()) { - throw new SQLException("already in batch mode"); - } - Connection batchConnection = null; - try { - batchConnection = getConnection(false); - batchConnection.setAutoCommit(false); - setTransactionAwareBatchConnection(batchConnection); - } catch (SQLException e) { - removeTransactionAwareBatchConnection(); - // Strive for failure atomicity - if (batchConnection != null) { - DbUtility.close(batchConnection, null, null); - } - throw e; - } - } - - /** - * This method always ends the batch mode. - * - * @param commit whether the changes in the batch should be committed or rolled back - * @throws SQLException if the commit or rollback of the underlying JDBC Connection threw an {@code - * SQLException} - */ - public final void endBatch(boolean commit) throws SQLException { - if (!inBatchMode()) { - throw new SQLException("not in batch mode"); - } - Connection batchConnection = getTransactionAwareBatchConnection(); - try { - if (commit) { - batchConnection.commit(); - } else { - batchConnection.rollback(); - } - } finally { - removeTransactionAwareBatchConnection(); - if (batchConnection != null) { - DbUtility.close(batchConnection, null, null); - } - } - } - - /** - * Executes a general SQL statement and immediately closes all resources. - * - * Note: We use a Statement if there are no parameters to avoid a problem on - * the Oracle 10g JDBC driver w.r.t. :NEW and :OLD keywords that triggers ORA-17041. - * - * @param sql an SQL statement string - * @param params the parameters for the SQL statement - * @throws SQLException on error - */ - public final void exec(final String sql, final Object... params) throws SQLException { - new RetryManager(params) { - - @Override - protected Void call() throws SQLException { - reallyExec(sql, params); - return null; - } - - }.doTry(); - } - - void reallyExec(String sql, Object... params) throws SQLException { - Connection con = null; - Statement stmt = null; - boolean inBatchMode = inBatchMode(); - try { - con = getConnection(inBatchMode); - if (params == null || params.length == 0) { - stmt = con.createStatement(); - stmt.execute(sql); - } else { - PreparedStatement p = con.prepareStatement(sql); - stmt = p; - execute(p, params); - } - } finally { - closeResources(con, stmt, null, inBatchMode); - } - } - - /** - * Executes an update or delete statement and returns the update count. - * - * @param sql an SQL statement string - * @param params the parameters for the SQL statement - * @return the update count - * @throws SQLException on error - */ - public final int update(final String sql, final Object... params) throws SQLException { - return new RetryManager(params) { - - @Override - protected Integer call() throws SQLException { - return reallyUpdate(sql, params); - } - - }.doTry(); - } - - int reallyUpdate(String sql, Object... params) throws SQLException { - Connection con = null; - PreparedStatement stmt = null; - boolean inBatchMode = inBatchMode(); - try { - con = getConnection(inBatchMode); - stmt = con.prepareStatement(sql); - return execute(stmt, params).getUpdateCount(); - } finally { - closeResources(con, stmt, null, inBatchMode); - } - } - - /** - * Executes a SQL query and returns the {@link ResultSet}. The - * returned {@link ResultSet} should be closed by clients. - * - * @param sql an SQL statement string - * @param params the parameters for the SQL statement - * @return a {@link ResultSet} - */ - public final ResultSet query(String sql, Object... params) throws SQLException { - return exec(sql, params, false, 0); - } - - /** - * Executes a general SQL statement and returns the {@link ResultSet} of the executed statement. The - * returned {@link ResultSet} should be closed by clients. - * - * @param sql an SQL statement string - * @param params the parameters for the SQL statement - * @param returnGeneratedKeys whether generated keys should be returned - * @param maxRows the maximum number of rows in a potential {@link ResultSet} (0 means no limit) - * @return a {@link ResultSet} - * @throws SQLException on error - */ - public final ResultSet exec(final String sql, final Object[] params, final boolean returnGeneratedKeys, - final int maxRows) throws SQLException { - return new RetryManager(params) { - - @Override - protected ResultSet call() throws SQLException { - return reallyExec(sql, params, returnGeneratedKeys, maxRows); - } - - }.doTry(); - } - - ResultSet reallyExec(String sql, Object[] params, boolean returnGeneratedKeys, int maxRows) - throws SQLException { - Connection con = null; - PreparedStatement stmt = null; - ResultSet rs = null; - boolean inBatchMode = inBatchMode(); - try { - con = getConnection(inBatchMode); - if (returnGeneratedKeys) { - stmt = con.prepareStatement(sql, Statement.RETURN_GENERATED_KEYS); - } else { - stmt = con.prepareStatement(sql); - } - stmt.setMaxRows(maxRows); - int currentFetchSize = this.fetchSize; - if (0 < maxRows && maxRows < currentFetchSize) { - currentFetchSize = maxRows; // JCR-3090 - } - stmt.setFetchSize(currentFetchSize); - execute(stmt, params); - if (returnGeneratedKeys) { - rs = stmt.getGeneratedKeys(); - } else { - rs = stmt.getResultSet(); - } - // Don't wrap null - if (rs == null) { - closeResources(con, stmt, rs, inBatchMode); - return null; - } - if (inBatchMode) { - return ResultSetWrapper.newInstance(null, stmt, rs); - } else { - return ResultSetWrapper.newInstance(con, stmt, rs); - } - } catch (SQLException e) { - closeResources(con, stmt, rs, inBatchMode); - throw e; - } - } - - /** - * Gets a connection based on the {@code batchMode} state of this helper. The connection should be closed - * by a call to {@link #closeResources(Connection, Statement, ResultSet)} which also takes the {@code - * batchMode} state into account. - * - * @param inBatchMode indicates if we are in a batchMode - * @return a {@code Connection} to use, based on the batch mode state - * @throws SQLException on error - */ - protected final Connection getConnection(boolean inBatchMode) throws SQLException { - if (inBatchMode) { - return getTransactionAwareBatchConnection(); - } else { - Connection con = dataSource.getConnection(); - // JCR-1013: Setter may fail unnecessarily on a managed connection - if (!con.getAutoCommit()) { - con.setAutoCommit(true); - } - return con; - } - } - - /** - * Returns the Batch Connection. - * - * @return Connection - */ - private Connection getTransactionAwareBatchConnection() { - Object threadId = TransactionContext.getCurrentThreadId(); - return batchConnectionMap.get(threadId); - } - - /** - * Stores the given Connection to the batchConnectionMap. - * If we are running in a XA Environment the globalTransactionId will be used as Key. - * In Non-XA Environment the ThreadName is used. - * - * @param batchConnection - */ - private void setTransactionAwareBatchConnection(Connection batchConnection) { - Object threadId = TransactionContext.getCurrentThreadId(); - batchConnectionMap.put(threadId, batchConnection); - } - - /** - * Removes the Batch Connection from the batchConnectionMap - */ - private void removeTransactionAwareBatchConnection() { - Object threadId = TransactionContext.getCurrentThreadId(); - batchConnectionMap.remove(threadId); - } - - /** - * Closes the given resources given the {@code batchMode} state. - * - * @param con the {@code Connection} obtained through the {@link #getConnection()} method - * @param stmt a {@code Statement} - * @param rs a {@code ResultSet} - * @param inBatchMode indicates if we are in a batchMode - */ - protected final void closeResources(Connection con, Statement stmt, ResultSet rs, boolean inBatchMode) { - if (inBatchMode) { - DbUtility.close(null, stmt, rs); - } else { - DbUtility.close(con, stmt, rs); - } - } - - /** - * This method is used by all methods of this class that execute SQL statements. This default - * implementation sets all parameters and unwraps {@link StreamWrapper} instances. Subclasses may override - * this method to do something special with the parameters. E.g., the {@link Oracle10R1ConnectionHelper} - * overrides it in order to add special blob handling. - * - * @param stmt the {@link PreparedStatement} to execute - * @param params the parameters - * @return the executed statement - * @throws SQLException on error - */ - protected PreparedStatement execute(PreparedStatement stmt, Object[] params) throws SQLException { - for (int i = 0; params != null && i < params.length; i++) { - Object p = params[i]; - if (p instanceof StreamWrapper) { - StreamWrapper wrapper = (StreamWrapper) p; - stmt.setBinaryStream(i + 1, wrapper.getStream(), (int) wrapper.getSize()); - } else { - stmt.setObject(i + 1, p); - } - } - try { - stmt.execute(); - } catch (SQLException e) { - //Reset Stream for retry ... - for (int i = 0; params != null && i < params.length; i++) { - Object p = params[i]; - if (p instanceof StreamWrapper) { - StreamWrapper wrapper = (StreamWrapper) p; - if(!wrapper.resetStream()) { - wrapper.cleanupResources(); - throw new RuntimeException("Unable to reset the Stream."); - } - } - } - throw e; - } - return stmt; - } - - /** - * This class encapsulates the logic to retry a method invocation if it threw an SQLException. - * The RetryManager must cleanup the Params it will get. - * - * @param the return type of the method which is retried if it failed - */ - public abstract class RetryManager { - - private Object[] params; - - public RetryManager(Object[] params) { - this.params = params; - } - - public final T doTry() throws SQLException { - if (inBatchMode()) { - return call(); - } else { - boolean sleepInterrupted = false; - int failures = 0; - SQLException lastException = null; - while (!sleepInterrupted && (blockOnConnectionLoss || failures <= RETRIES)) { - try { - T object = call(); - cleanupParamResources(); - return object; - } catch (SQLException e) { - lastException = e; - } - log.error("Failed to execute SQL (stacktrace on DEBUG log level): " + lastException); - log.debug("Failed to execute SQL", lastException); - failures++; - if (blockOnConnectionLoss || failures <= RETRIES) { // if we're going to try again - try { - Thread.sleep(SLEEP_BETWEEN_RETRIES_MS); - } catch (InterruptedException e1) { - Thread.currentThread().interrupt(); - sleepInterrupted = true; - log.error("Interrupted: canceling retry"); - } - } - } - cleanupParamResources(); - throw lastException; - } - } - - protected abstract T call() throws SQLException; - - /** - * Cleans up the Parameter resources that are not automatically closed or deleted. - * - * @param params - */ - protected void cleanupParamResources() { - for (int i = 0; params != null && i < params.length; i++) { - Object p = params[i]; - if (p instanceof StreamWrapper) { - StreamWrapper wrapper = (StreamWrapper) p; - wrapper.cleanupResources(); - } - } - } - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/DatabaseAware.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/DatabaseAware.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/DatabaseAware.java (working copy) @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.util.db; - -/** - * Bean components (i.e., classes that appear in the repository descriptor) that implement this interface will - * get the repositories {@link ConnectionFactory} instance injected just after construction and before - * initialization. - */ -public interface DatabaseAware { - - /** - * @param connectionFactory - */ - void setConnectionFactory(ConnectionFactory connectionFactory); -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/DataSourceWrapper.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/DataSourceWrapper.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/DataSourceWrapper.java (working copy) @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.util.db; - -import java.io.PrintWriter; -import java.sql.Connection; -import java.sql.SQLException; -import java.util.logging.Logger; - -import javax.sql.DataSource; - -/** - * This class delegates all calls to the corresponding method on the wrapped {@code DataSource} except for the {@link #getConnection()} method, - * which delegates to {@code DataSource#getConnection(String, String)} with the username and password - * which are given on construction. - */ -public class DataSourceWrapper implements DataSource { - - private final DataSource dataSource; - - private final String username; - - private final String password; - - /** - * @param dataSource the {@code DataSource} to wrap - * @param username the username to use - * @param password the password to use - */ - public DataSourceWrapper(DataSource dataSource, String username, String password) { - this.dataSource = dataSource; - this.username = username; - this.password = password; - } - - /** - * Java 6 method. - * - * {@inheritDoc} - */ - public boolean isWrapperFor(Class arg0) throws SQLException { - throw new UnsupportedOperationException("Java 6 method not supported"); - } - - /** - * Java 6 method. - * - * {@inheritDoc} - */ - public T unwrap(Class arg0) throws SQLException { - throw new UnsupportedOperationException("Java 6 method not supported"); - } - - /** - * Unsupported Java 7 method. - * - * @see JCR-3167 - */ - public Logger getParentLogger() { - throw new UnsupportedOperationException("Java 7 method not supported"); - } - - /** - * {@inheritDoc} - */ - public Connection getConnection() throws SQLException { - return dataSource.getConnection(username, password); - } - - /** - * {@inheritDoc} - */ - public Connection getConnection(String username, String password) throws SQLException { - return dataSource.getConnection(username, password); - } - - /** - * {@inheritDoc} - */ - public PrintWriter getLogWriter() throws SQLException { - return dataSource.getLogWriter(); - } - - /** - * {@inheritDoc} - */ - public int getLoginTimeout() throws SQLException { - return dataSource.getLoginTimeout(); - } - - /** - * {@inheritDoc} - */ - public void setLogWriter(PrintWriter out) throws SQLException { - dataSource.setLogWriter(out); - } - - /** - * {@inheritDoc} - */ - public void setLoginTimeout(int seconds) throws SQLException { - dataSource.setLoginTimeout(seconds); - } - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/DbUtility.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/DbUtility.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/DbUtility.java (working copy) @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.util.db; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class contains some database utility methods. - */ -public final class DbUtility { - - private static final Logger LOG = LoggerFactory.getLogger(DbUtility.class); - - /** - * Private constructor for utility class pattern. - */ - private DbUtility() { - } - - /** - * This is a utility method which closes the given resources without throwing exceptions. Any exceptions - * encountered are logged instead. - * - * @param rs the {@link ResultSet} to close, may be null - */ - public static void close(ResultSet rs) { - close(null, null, rs); - } - - /** - * This is a utility method which closes the given resources without throwing exceptions. Any exceptions - * encountered are logged instead. - * - * @param con the {@link Connection} to close, may be null - * @param stmt the {@link Statement} to close, may be null - * @param rs the {@link ResultSet} to close, may be null - */ - public static void close(Connection con, Statement stmt, ResultSet rs) { - try { - if (rs != null) { - rs.close(); - } - } catch (SQLException e) { - logException("failed to close ResultSet", e); - } finally { - try { - if (stmt != null) { - stmt.close(); - } - } catch (SQLException e) { - logException("failed to close Statement", e); - } finally { - try { - if (con != null && !con.isClosed()) { - con.close(); - } - } catch (SQLException e) { - logException("failed to close Connection", e); - } - } - } - } - - /** - * Logs an SQL exception on error level, and debug level (more detail). - * - * @param message the message - * @param e the exception - */ - public static void logException(String message, SQLException e) { - if (message != null) { - LOG.error(message); - } - LOG.error(" Reason: " + e.getMessage()); - LOG.error(" State/Code: " + e.getSQLState() + "/" + e.getErrorCode()); - LOG.debug(" dump:", e); - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/DerbyConnectionHelper.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/DerbyConnectionHelper.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/DerbyConnectionHelper.java (working copy) @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.util.db; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; - -import javax.sql.DataSource; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * - */ -public final class DerbyConnectionHelper extends ConnectionHelper { - - /** name of the embedded driver */ - public static final String DERBY_EMBEDDED_DRIVER = "org.apache.derby.jdbc.EmbeddedDriver"; - - private static Logger log = LoggerFactory.getLogger(DerbyConnectionHelper.class); - - /** - * @param dataSrc the {@code DataSource} on which this helper acts - * @param block whether to block on connection loss until the db is up again - */ - public DerbyConnectionHelper(DataSource dataSrc, boolean block) { - super(dataSrc, block); - } - - /** - * Shuts the embedded Derby database down. - * - * @param driver the driver - * @throws SQLException on failure - */ - public void shutDown(String driver) throws SQLException { - // check for embedded driver - if (!DERBY_EMBEDDED_DRIVER.equals(driver)) { - return; - } - - // prepare connection url for issuing shutdown command - String url = null; - Connection con = null; - - try { - con = dataSource.getConnection(); - try { - url = con.getMetaData().getURL(); - } catch (SQLException e) { - // JCR-1557: embedded derby db probably already shut down; - // this happens when configuring multiple FS/PM instances - // to use the same embedded derby db instance. - log.debug("failed to retrieve connection url: embedded db probably already shut down", e); - return; - } - // we have to reset the connection to 'autoCommit=true' before closing it; - // otherwise Derby would mysteriously complain about some pending uncommitted - // changes which can't possibly be true. - // @todo further investigate - con.setAutoCommit(true); - } - finally { - DbUtility.close(con, null, null); - } - int pos = url.lastIndexOf(';'); - if (pos != -1) { - // strip any attributes from connection url - url = url.substring(0, pos); - } - url += ";shutdown=true"; - - // now it's safe to shutdown the embedded Derby database - try { - DriverManager.getConnection(url); - } catch (SQLException e) { - // a shutdown command always raises a SQLException - log.info(e.getMessage()); - } - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/Oracle10R1ConnectionHelper.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/Oracle10R1ConnectionHelper.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/Oracle10R1ConnectionHelper.java (working copy) @@ -1,168 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.util.db; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.lang.reflect.Method; -import java.sql.Blob; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; - -import javax.sql.DataSource; - -import org.apache.commons.io.IOUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The connection helper for Oracle databases of version up to 10.1. It has special blob handling. - */ -public final class Oracle10R1ConnectionHelper extends OracleConnectionHelper { - - /** - * the default logger - */ - private static Logger log = LoggerFactory.getLogger(Oracle10R1ConnectionHelper.class); - - private Class blobClass; - - private Integer durationSessionConstant; - - private Integer modeReadWriteConstant; - - /** - * @param dataSrc the {@code DataSource} on which this helper acts - * @param block whether to block on connection loss until the db is up again - */ - public Oracle10R1ConnectionHelper(DataSource dataSrc, boolean block) { - super(dataSrc, block); - } - - /** - * Retrieve the oracle.sql.BLOB class via reflection, and initialize the values for the - * DURATION_SESSION and MODE_READWRITE constants defined there. - * - * @see oracle.sql.BLOB#DURATION_SESSION - * @see oracle.sql.BLOB#MODE_READWRITE - */ - @Override - public void init() throws Exception { - super.init(); - // initialize oracle.sql.BLOB class & constants - - // use the Connection object for using the exact same - // class loader that the Oracle driver was loaded with - Connection con = null; - try { - con = dataSource.getConnection(); - blobClass = con.getClass().getClassLoader().loadClass("oracle.sql.BLOB"); - durationSessionConstant = new Integer(blobClass.getField("DURATION_SESSION").getInt(null)); - modeReadWriteConstant = new Integer(blobClass.getField("MODE_READWRITE").getInt(null)); - } finally { - if (con != null) { - DbUtility.close(con, null, null); - } - } - } - - /** - * Wraps any input-stream parameters in temporary blobs and frees these again after the statement - * has been executed. - * - * {@inheritDoc} - */ - @Override - protected PreparedStatement execute(PreparedStatement stmt, Object[] params) throws SQLException { - List tmpBlobs = new ArrayList(); - try { - for (int i = 0; params != null && i < params.length; i++) { - Object p = params[i]; - if (p instanceof StreamWrapper) { - StreamWrapper wrapper = (StreamWrapper) p; - Blob tmp = createTemporaryBlob(stmt.getConnection(), wrapper.getStream()); - tmpBlobs.add(tmp); - stmt.setBlob(i + 1, tmp); - } else if (p instanceof InputStream) { - Blob tmp = createTemporaryBlob(stmt.getConnection(), (InputStream) p); - tmpBlobs.add(tmp); - stmt.setBlob(i + 1, tmp); - } else { - stmt.setObject(i + 1, p); - } - } - stmt.execute(); - return stmt; - } catch (Exception e) { - throw new SQLException(e.getMessage()); - } finally { - for (Blob blob : tmpBlobs) { - try { - freeTemporaryBlob(blob); - } catch (Exception e) { - log.warn("Could not close temporary blob", e); - } - } - } - } - - /** - * Creates a temporary oracle.sql.BLOB instance via reflection and spools the contents of the specified - * stream. - */ - private Blob createTemporaryBlob(Connection con, InputStream in) throws Exception { - /* - * BLOB blob = BLOB.createTemporary(con, false, BLOB.DURATION_SESSION); - * blob.open(BLOB.MODE_READWRITE); OutputStream out = blob.getBinaryOutputStream(); ... out.flush(); - * out.close(); blob.close(); return blob; - */ - Method createTemporary = - blobClass.getMethod("createTemporary", new Class[]{Connection.class, Boolean.TYPE, Integer.TYPE}); - Object blob = - createTemporary.invoke(null, new Object[]{ConnectionFactory.unwrap(con), Boolean.FALSE, - durationSessionConstant}); - Method open = blobClass.getMethod("open", new Class[]{Integer.TYPE}); - open.invoke(blob, new Object[]{modeReadWriteConstant}); - Method getBinaryOutputStream = blobClass.getMethod("getBinaryOutputStream", new Class[0]); - OutputStream out = (OutputStream) getBinaryOutputStream.invoke(blob); - try { - IOUtils.copy(in, out); - } finally { - try { - out.flush(); - } catch (IOException ioe) { - } - out.close(); - } - Method close = blobClass.getMethod("close", new Class[0]); - close.invoke(blob); - return (Blob) blob; - } - - /** - * Frees a temporary oracle.sql.BLOB instance via reflection. - */ - private void freeTemporaryBlob(Blob blob) throws Exception { - // blob.freeTemporary(); - Method freeTemporary = blobClass.getMethod("freeTemporary", new Class[0]); - freeTemporary.invoke(blob); - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/OracleConnectionHelper.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/OracleConnectionHelper.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/OracleConnectionHelper.java (working copy) @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.util.db; - -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.SQLException; - -import javax.sql.DataSource; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The connection helper for Oracle databases of version 10.2 and later. - */ -public class OracleConnectionHelper extends ConnectionHelper { - - /** - * the default logger - */ - private static Logger log = LoggerFactory.getLogger(OracleConnectionHelper.class); - - /** - * @param dataSrc the {@code DataSource} on which this helper acts - * @param block whether to block on connection loss until the db is up again - */ - public OracleConnectionHelper(DataSource dataSrc, boolean block) { - super(dataSrc, true, block); - } - - /** - * Initializes the helper: checks for valid driver version. - * Subclasses that override this method should still call it! - * - * @throws Exception on error - */ - public void init() throws Exception { - // check driver version - Connection connection = dataSource.getConnection(); - try { - DatabaseMetaData metaData = connection.getMetaData(); - if (metaData.getDriverMajorVersion() < 10) { - // Oracle drivers prior to version 10 only support - // writing BLOBs up to 32k in size... - log.warn("Unsupported driver version detected: " - + metaData.getDriverName() - + " v" + metaData.getDriverVersion()); - } - } catch (SQLException e) { - log.warn("Can not retrieve driver version", e); - } finally { - DbUtility.close(connection, null, null); - } - } - - /** - * Since Oracle only supports table names up to 30 characters in - * length illegal characters are simply replaced with "_" rather than - * escaping them with "_x0000_". - * - * {@inheritDoc} - */ - @Override - protected final void replaceCharacter(StringBuilder escaped, char c) { - escaped.append("_"); - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/PostgreSQLConnectionHelper.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/PostgreSQLConnectionHelper.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/PostgreSQLConnectionHelper.java (working copy) @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.util.db; - -import javax.sql.DataSource; - -/** - * The connection helper for PSQL databases. It has special fetch size handling. - */ -public final class PostgreSQLConnectionHelper extends ConnectionHelper { - - /** - * @param dataSrc the {@code DataSource} on which this helper acts - * @param block whether to block on connection loss until the db is up again - */ - public PostgreSQLConnectionHelper(DataSource dataSrc, boolean block) { - super(dataSrc, false, block, 10000); - } - -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/ResultSetWrapper.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/ResultSetWrapper.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/ResultSetWrapper.java (working copy) @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.util.db; - -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.Method; -import java.lang.reflect.Proxy; -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.Statement; - -/** - * This is a dynamic proxy in order to support both Java 5 and 6. - */ -public final class ResultSetWrapper implements InvocationHandler { - - private final Connection connection; - - private final Statement statement; - - private final ResultSet resultSet; - - /** - * Creates a new {@code ResultSet} proxy which closes the given {@code Connection} and - * {@code Statement} if it is closed. - * - * @param con the associated {@code Connection} - * @param stmt the associated {@code Statement} - * @param rs the {@code ResultSet} which backs the proxy - * @return a {@code ResultSet} proxy - */ - public static final ResultSet newInstance(Connection con, Statement stmt, ResultSet rs) { - ResultSetWrapper proxy = new ResultSetWrapper(con, stmt, rs); - return (ResultSet) Proxy.newProxyInstance(rs.getClass().getClassLoader(), - new Class[]{ResultSet.class}, proxy); - } - - private ResultSetWrapper(Connection con, Statement stmt, ResultSet rs) { - connection = con; - statement = stmt; - resultSet = rs; - } - - /** - * {@inheritDoc} - */ - public Object invoke(Object proxy, Method m, Object[] args) throws Throwable { - if ("close".equals(m.getName())) { - DbUtility.close(connection, statement, resultSet); - return null; - } else { - return m.invoke(resultSet, args); - } - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/StreamWrapper.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/StreamWrapper.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/db/StreamWrapper.java (working copy) @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.core.util.db; - -import java.io.BufferedInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.sql.SQLException; - -import org.apache.jackrabbit.core.data.db.TempFileInputStream; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class StreamWrapper { - - static Logger log = LoggerFactory.getLogger(StreamWrapper.class); - - private InputStream stream; - private final long size; - - /** - * Creates a wrapper for the given InputStream that can - * safely be passed as a parameter to the {@link ConnectionHelper#exec(String, Object...)}, - * {@link ConnectionHelper#exec(String, Object[], boolean, int)} and - * {@link ConnectionHelper#update(String, Object[])} methods. - * If the wrapped Stream is a {@link TempFileInputStream} it will be wrapped again by a {@link BufferedInputStream}. - * - * @param in the InputStream to wrap - * @param size the size of the input stream - */ - public StreamWrapper(InputStream in, long size) { - this.stream = in; - this.size = size; - } - - public InputStream getStream() { - if (stream instanceof TempFileInputStream) { - return new BufferedInputStream(stream); - } - return stream; - } - - public long getSize() { - return size; - } - - /** - * Cleans up the internal Resources - */ - public void cleanupResources() { - if (stream instanceof TempFileInputStream) { - try { - stream.close(); - ((TempFileInputStream) stream).deleteFile(); - } catch (IOException e) { - log.warn("Unable to cleanup the TempFileInputStream"); - } - } - } - - /** - * Resets the internal InputStream that it could be re-read.
- * Is used from {@link RetryManager} if a {@link SQLException} has occurred.
- * At the moment only a {@link TempFileInputStream} can be reseted. - * - * @return returns true if it was able to reset the Stream - */ - public boolean resetStream() { - if (stream instanceof TempFileInputStream) { - try { - TempFileInputStream tempFileInputStream = (TempFileInputStream) stream; - // Close it if it is not already closed ... - tempFileInputStream.close(); - stream = new TempFileInputStream(tempFileInputStream.getFile(), true); - return true; - } catch (Exception e) { - log.warn("Failed to create a new TempFileInputStream", e); - } - } - return false; - } -} Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/XAReentrantLock.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/XAReentrantLock.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/XAReentrantLock.java (working copy) @@ -16,8 +16,10 @@ */ package org.apache.jackrabbit.core.util; -import static org.apache.jackrabbit.core.TransactionContext.isSameThreadId; -import org.apache.jackrabbit.core.TransactionContext; +import static org.apache.jackrabbit.data.core.TransactionContext.isSameThreadId; + +import org.apache.jackrabbit.data.core.TransactionContext; + import EDU.oswego.cs.dl.util.concurrent.ReentrantLock; /** Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/XAReentrantWriterPreferenceReadWriteLock.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/XAReentrantWriterPreferenceReadWriteLock.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/util/XAReentrantWriterPreferenceReadWriteLock.java (working copy) @@ -16,8 +16,8 @@ */ package org.apache.jackrabbit.core.util; -import static org.apache.jackrabbit.core.TransactionContext.getCurrentThreadId; -import static org.apache.jackrabbit.core.TransactionContext.isSameThreadId; +import static org.apache.jackrabbit.data.core.TransactionContext.getCurrentThreadId; +import static org.apache.jackrabbit.data.core.TransactionContext.isSameThreadId; import EDU.oswego.cs.dl.util.concurrent.ReentrantWriterPreferenceReadWriteLock; /** Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/version/InternalXAVersionManager.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/version/InternalXAVersionManager.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/version/InternalXAVersionManager.java (working copy) @@ -24,10 +24,7 @@ import javax.jcr.Session; import javax.jcr.version.VersionException; -import org.apache.jackrabbit.core.InternalXAResource; import org.apache.jackrabbit.core.SessionImpl; -import org.apache.jackrabbit.core.TransactionContext; -import org.apache.jackrabbit.core.TransactionException; import org.apache.jackrabbit.core.id.ItemId; import org.apache.jackrabbit.core.id.NodeId; import org.apache.jackrabbit.core.nodetype.NodeTypeRegistry; @@ -45,6 +42,9 @@ import org.apache.jackrabbit.core.virtual.VirtualItemStateProvider; import org.apache.jackrabbit.core.virtual.VirtualNodeState; import org.apache.jackrabbit.core.virtual.VirtualPropertyState; +import org.apache.jackrabbit.data.core.InternalXAResource; +import org.apache.jackrabbit.data.core.TransactionContext; +import org.apache.jackrabbit.data.core.TransactionException; import org.apache.jackrabbit.spi.Name; import org.apache.jackrabbit.spi.commons.name.NameConstants; Index: jackrabbit-core/src/main/java/org/apache/jackrabbit/core/XASessionImpl.java =================================================================== --- jackrabbit-core/src/main/java/org/apache/jackrabbit/core/XASessionImpl.java (revision 1564627) +++ jackrabbit-core/src/main/java/org/apache/jackrabbit/core/XASessionImpl.java (working copy) @@ -23,6 +23,8 @@ import org.apache.jackrabbit.core.state.XAItemStateManager; import org.apache.jackrabbit.core.version.InternalVersionManager; import org.apache.jackrabbit.core.version.InternalXAVersionManager; +import org.apache.jackrabbit.data.core.InternalXAResource; +import org.apache.jackrabbit.data.core.TransactionContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; Index: jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/azure.properties =================================================================== --- jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/azure.properties (revision 1564627) +++ jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/azure.properties (working copy) @@ -1,17 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -driver=com.microsoft.sqlserver.jdbc.SQLServerDriver -createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA IMAGE) Index: jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/db2.properties =================================================================== --- jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/db2.properties (revision 1564627) +++ jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/db2.properties (working copy) @@ -1,17 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -driver=COM.ibm.db2.jdbc.net.DB2Driver -createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY NOT NULL, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA BLOB(1000M)) Index: jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/derby.properties =================================================================== --- jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/derby.properties (revision 1564627) +++ jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/derby.properties (working copy) @@ -1,17 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Tested with Apache Derby 10.3.1.4 on Windows XP (2007-12-11) -driver=org.apache.derby.jdbc.EmbeddedDriver \ No newline at end of file Index: jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/h2.properties =================================================================== --- jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/h2.properties (revision 1564627) +++ jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/h2.properties (working copy) @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Tested with H2 1.0.63 on Windows XP (2007-12-11) -driver=org.h2.Driver -storeStream=-1 \ No newline at end of file Index: jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/ingres.properties =================================================================== --- jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/ingres.properties (revision 1564627) +++ jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/ingres.properties (working copy) @@ -1,17 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -driver=com.ingres.jdbc.IngresDriver -createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY NOT NULL, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA LONG BYTE) Index: jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/mssql.properties =================================================================== --- jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/mssql.properties (revision 1564627) +++ jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/mssql.properties (working copy) @@ -1,17 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -driver=com.microsoft.sqlserver.jdbc.SQLServerDriver -createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA IMAGE) Index: jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/mysql.properties =================================================================== --- jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/mysql.properties (revision 1564627) +++ jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/mysql.properties (working copy) @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Tested with MySQL 5.0.27-community-nt on Windows XP (2007-12-11) -# currently, the objects must fit in memory -driver=com.mysql.jdbc.Driver -createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA BLOB(2147483647)) Index: jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/oracle.properties =================================================================== --- jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/oracle.properties (revision 1564627) +++ jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/oracle.properties (working copy) @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Tested with Oracle Database 10g Release 10.2.0.1.0 on Windows XP (2008-04-29) -driver=oracle.jdbc.OracleDriver -createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY, LENGTH NUMBER, LAST_MODIFIED NUMBER, DATA BLOB) Index: jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/postgresql.properties =================================================================== --- jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/postgresql.properties (revision 1564627) +++ jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/postgresql.properties (working copy) @@ -1,20 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Tested with PostgreSQL 8.2.4 on Windows XP (2007-12-11) -# currently, the objects must fit in memory -driver=org.postgresql.Driver -table=datastore -createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA BYTEA) Index: jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/sqlserver.properties =================================================================== --- jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/sqlserver.properties (revision 1564627) +++ jackrabbit-core/src/main/resources/org/apache/jackrabbit/core/data/db/sqlserver.properties (working copy) @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Tested with Microsoft SQL Server 2005 4 on Windows XP (2007-12-11) -driver=com.microsoft.sqlserver.jdbc.SQLServerDriver -createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA IMAGE) Index: jackrabbit-core/src/test/java/org/apache/jackrabbit/core/data/GarbageCollectorTest.java =================================================================== --- jackrabbit-core/src/test/java/org/apache/jackrabbit/core/data/GarbageCollectorTest.java (revision 1564627) +++ jackrabbit-core/src/test/java/org/apache/jackrabbit/core/data/GarbageCollectorTest.java (working copy) @@ -19,6 +19,7 @@ import org.apache.jackrabbit.api.management.DataStoreGarbageCollector; import org.apache.jackrabbit.api.management.MarkEventListener; import org.apache.jackrabbit.core.SessionImpl; +import org.apache.jackrabbit.core.gc.GarbageCollector; import org.apache.jackrabbit.test.AbstractJCRTest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; Index: jackrabbit-core/src/test/java/org/apache/jackrabbit/core/data/GCEventListenerTest.java =================================================================== --- jackrabbit-core/src/test/java/org/apache/jackrabbit/core/data/GCEventListenerTest.java (revision 1564627) +++ jackrabbit-core/src/test/java/org/apache/jackrabbit/core/data/GCEventListenerTest.java (working copy) @@ -19,6 +19,7 @@ import org.apache.jackrabbit.api.management.DataStoreGarbageCollector; import org.apache.jackrabbit.api.management.MarkEventListener; import org.apache.jackrabbit.core.SessionImpl; +import org.apache.jackrabbit.core.gc.GarbageCollector; import org.apache.jackrabbit.test.AbstractJCRTest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; Index: jackrabbit-core/src/test/java/org/apache/jackrabbit/core/data/GCSubtreeMoveTest.java =================================================================== --- jackrabbit-core/src/test/java/org/apache/jackrabbit/core/data/GCSubtreeMoveTest.java (revision 1564627) +++ jackrabbit-core/src/test/java/org/apache/jackrabbit/core/data/GCSubtreeMoveTest.java (working copy) @@ -35,6 +35,7 @@ import org.apache.jackrabbit.api.management.MarkEventListener; import org.apache.jackrabbit.core.RepositoryFactoryImpl; import org.apache.jackrabbit.core.SessionImpl; +import org.apache.jackrabbit.core.gc.GarbageCollector; import org.slf4j.Logger; import org.slf4j.LoggerFactory; Index: jackrabbit-core/src/test/java/org/apache/jackrabbit/core/data/GCThread.java =================================================================== --- jackrabbit-core/src/test/java/org/apache/jackrabbit/core/data/GCThread.java (revision 1564627) +++ jackrabbit-core/src/test/java/org/apache/jackrabbit/core/data/GCThread.java (working copy) @@ -19,6 +19,7 @@ import org.apache.jackrabbit.api.management.DataStoreGarbageCollector; import org.apache.jackrabbit.api.management.MarkEventListener; import org.apache.jackrabbit.core.SessionImpl; +import org.apache.jackrabbit.core.gc.GarbageCollector; import org.slf4j.Logger; import org.slf4j.LoggerFactory; Index: jackrabbit-data/pom.xml =================================================================== --- jackrabbit-data/pom.xml (revision 0) +++ jackrabbit-data/pom.xml (working copy) @@ -0,0 +1,92 @@ + + + + + + 4.0.0 + + + + + + org.apache.jackrabbit + jackrabbit-parent + 2.8-SNAPSHOT + ../jackrabbit-parent/pom.xml + + jackrabbit-data + Jackrabbit Data + Jackrabbit DataStore Implentations + bundle + + + + + org.apache.felix + maven-bundle-plugin + true + + + org.apache.rat + apache-rat-plugin + + + .checkstyle + + + + + + + + + + + + javax.jcr + jcr + + + org.apache.jackrabbit + jackrabbit-api + ${project.version} + + + org.apache.jackrabbit + jackrabbit-jcr-commons + ${project.version} + + + commons-collections + commons-collections + + + commons-io + commons-io + + + commons-dbcp + commons-dbcp + 1.3 + + + org.apache.derby + derby + + + org.slf4j + jcl-over-slf4j + + + + \ No newline at end of file Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/config/ConfigurationException.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/config/ConfigurationException.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/config/ConfigurationException.java (working copy) @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.config; + +import javax.jcr.RepositoryException; + +/** + * Exception class used for configuration errors. + */ +public class ConfigurationException extends RepositoryException { + + /** + * Creates a configuration exception. + * + * @param message configuration message + */ + public ConfigurationException(String message) { + super(message); + } + + /** + * Creates a configuration exception that is caused by another exception. + * + * @param message configuration error message + * @param cause root cause of the configuration error + */ + public ConfigurationException(String message, Exception cause) { + super(message, cause); + } + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/config/DataSourceConfig.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/config/DataSourceConfig.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/config/DataSourceConfig.java (working copy) @@ -0,0 +1,225 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.config; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Properties; + +import javax.naming.Context; + +/** + * This class contains list of definitions for {@code DataSource} instances. + */ +public class DataSourceConfig { + + public static final String DRIVER = "driver"; + + public static final String URL = "url"; + + public static final String USER = "user"; + + public static final String PASSWORD = "password"; + + public static final String DB_TYPE = "databaseType"; + + public static final String VALIDATION_QUERY = "validationQuery"; + + public static final String MAX_POOL_SIZE = "maxPoolSize"; + + private final List defs = new ArrayList(); + + /** + * Adds a DataSourceDefinition from the given properties. + * + * @param props the properties (key and values must be strings) + * @throws ConfigurationException on error + */ + public void addDataSourceDefinition(String name, Properties props) throws ConfigurationException { + DataSourceDefinition def = new DataSourceDefinition(name, props); + for (DataSourceDefinition existing : defs) { + if (existing.getLogicalName().equals(def.getLogicalName())) { + throw new ConfigurationException("Duplicate logicalName for a DataSource: " + + def.getLogicalName()); + } + } + defs.add(def); + } + + /** + * @return the unmodifiable list of the current {@link DataSourceDefinition}s + */ + public List getDefinitions() { + return Collections.unmodifiableList(defs); + } + + /** + * The definition of a DataSource. + */ + public static final class DataSourceDefinition { + + private static final List allPropNames = + Arrays.asList(DRIVER, URL, USER, PASSWORD, DB_TYPE, VALIDATION_QUERY, MAX_POOL_SIZE); + + private static final List allJndiPropNames = + Arrays.asList(DRIVER, URL, USER, PASSWORD, DB_TYPE); + + private final String logicalName; + + private final String driver; + + private final String url; + + private final String user; + + private final String password; + + private final String dbType; + + private final String validationQuery; + + private final int maxPoolSize; + + /** + * Creates a DataSourceDefinition from the given properties and + * throws a {@link ConfigurationException} when the set of properties does not + * satisfy some validity constraints. + * + * @param name the logical name of the data source + * @param props the properties (string keys and values) + * @throws ConfigurationException on error + */ + public DataSourceDefinition(String name, Properties props) throws ConfigurationException { + this.logicalName = name; + this.driver = (String) props.getProperty(DRIVER); + this.url = (String) props.getProperty(URL); + this.user = (String) props.getProperty(USER); + this.password = (String) props.getProperty(PASSWORD); + this.dbType = (String) props.getProperty(DB_TYPE); + this.validationQuery = (String) props.getProperty(VALIDATION_QUERY); + try { + this.maxPoolSize = Integer.parseInt((String) props.getProperty(MAX_POOL_SIZE, "-1")); + } catch (NumberFormatException e) { + throw new ConfigurationException("failed to parse " + MAX_POOL_SIZE + + " property for DataSource " + logicalName); + } + verify(props); + } + + private void verify(Properties props) throws ConfigurationException { + // Check required properties + if (logicalName == null || "".equals(logicalName.trim())) { + throw new ConfigurationException("DataSource logical name must not be null or empty"); + } + if (driver == null || "".equals(driver)) { + throw new ConfigurationException("DataSource driver must not be null or empty"); + } + if (url == null || "".equals(url)) { + throw new ConfigurationException("DataSource URL must not be null or empty"); + } + if (dbType == null || "".equals(dbType)) { + throw new ConfigurationException("DataSource databaseType must not be null or empty"); + } + // Check unknown properties + for (Object propName : props.keySet()) { + if (!allPropNames.contains((String) propName)) { + throw new ConfigurationException("Unknown DataSource property: " + propName); + } + } + // Check JNDI config: + if (isJndiConfig()) { + for (Object propName : props.keySet()) { + if (!allJndiPropNames.contains((String) propName)) { + throw new ConfigurationException("Property " + propName + + " is not allowed for a DataSource obtained through JNDI" + + ", DataSource logicalName = " + logicalName); + } + } + } + } + + private boolean isJndiConfig() throws ConfigurationException { + Class driverClass = null; + try { + if (driver.length() > 0) { + driverClass = Class.forName(driver); + } + } catch (ClassNotFoundException e) { + throw new ConfigurationException("Could not load JDBC driver class " + driver, e); + } + return driverClass != null && Context.class.isAssignableFrom(driverClass); + } + + /** + * @return the logicalName + */ + public String getLogicalName() { + return logicalName; + } + + /** + * @return the driver + */ + public String getDriver() { + return driver; + } + + /** + * @return the url + */ + public String getUrl() { + return url; + } + + /** + * @return the user + */ + public String getUser() { + return user; + } + + /** + * @return the dbType + */ + public String getDbType() { + return dbType; + } + + /** + * @return the password + */ + public String getPassword() { + return password; + } + + /** + * @return the validationQuery + */ + public String getValidationQuery() { + return validationQuery; + } + + /** + * @return the maxPoolSize + */ + public int getMaxPoolSize() { + return maxPoolSize; + } + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/AbstractDataRecord.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/AbstractDataRecord.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/AbstractDataRecord.java (working copy) @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.core.data; + + +/** + * Abstract data record base class. This base class contains only + * a reference to the data identifier of the record and implements + * the standard {@link Object} equality, hash code, and string + * representation methods based on the identifier. + */ +public abstract class AbstractDataRecord implements DataRecord { + + /** + * The data store that contains this record. + */ + private final AbstractDataStore store; + + /** + * The binary identifier; + */ + private final DataIdentifier identifier; + + /** + * Creates a data record with the given identifier. + * + * @param identifier data identifier + */ + public AbstractDataRecord( + AbstractDataStore store, DataIdentifier identifier) { + this.store = store; + this.identifier = identifier; + } + + /** + * Returns the data identifier. + * + * @return data identifier + */ + public DataIdentifier getIdentifier() { + return identifier; + } + + public String getReference() { + return store.getReferenceFromIdentifier(identifier); + } + + /** + * Returns the string representation of the data identifier. + * + * @return string representation + */ + public String toString() { + return identifier.toString(); + } + + /** + * Checks if the given object is a data record with the same identifier + * as this one. + * + * @param object other object + * @return true if the other object is a data record and has + * the same identifier as this one, false otherwise + */ + public boolean equals(Object object) { + return (object instanceof DataRecord) + && identifier.equals(((DataRecord) object).getIdentifier()); + } + + /** + * Returns the hash code of the data identifier. + * + * @return hash code + */ + public int hashCode() { + return identifier.hashCode(); + } + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/AbstractDataStore.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/AbstractDataStore.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/AbstractDataStore.java (working copy) @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.data; + +import java.security.SecureRandom; +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; + + +public abstract class AbstractDataStore implements DataStore { + + private static final String ALGORITHM = "HmacSHA1"; + + /** + * Array of hexadecimal digits. + */ + private static final char[] HEX = "0123456789abcdef".toCharArray(); + + /** + * Cached copy of the reference key of this data store. Initialized in + * {@link #getReferenceKey()} when the key is first accessed. + */ + private byte[] referenceKey = null; + + //---------------------------------------------------------< DataStore >-- + + public DataRecord getRecord(DataIdentifier identifier) + throws DataStoreException { + DataRecord record = getRecordIfStored(identifier); + if (record != null) { + return record; + } else { + throw new DataStoreException( + "Record " + identifier + " does not exist"); + } + } + + public DataRecord getRecordFromReference(String reference) + throws DataStoreException { + if (reference != null) { + int colon = reference.indexOf(':'); + if (colon != -1) { + DataIdentifier identifier = + new DataIdentifier(reference.substring(0, colon)); + if (reference.equals(getReferenceFromIdentifier(identifier))) { + return getRecordIfStored(identifier); + } + } + } + return null; + } + + //---------------------------------------------------------< protected >-- + + /** + * Returns the hex encoding of the given bytes. + * + * @param value value to be encoded + * @return encoded value + */ + protected static String encodeHexString(byte[] value) { + char[] buffer = new char[value.length * 2]; + for (int i = 0; i < value.length; i++) { + buffer[2 * i] = HEX[(value[i] >> 4) & 0x0f]; + buffer[2 * i + 1] = HEX[value[i] & 0x0f]; + } + return new String(buffer); + } + + protected String getReferenceFromIdentifier(DataIdentifier identifier) { + try { + String id = identifier.toString(); + + Mac mac = Mac.getInstance(ALGORITHM); + mac.init(new SecretKeySpec(getReferenceKey(), ALGORITHM)); + byte[] hash = mac.doFinal(id.getBytes("UTF-8")); + + return id + ':' + encodeHexString(hash); + } catch (Exception e) { + // TODO: log a warning about this exception + } + return null; + } + + /** + * Returns the reference key of this data store. If one does not already + * exist, it is automatically created in an implementation-specific way. + * The default implementation simply creates a temporary random key that's + * valid only until the data store gets restarted. Subclasses can override + * and/or decorate this method to support a more persistent reference key. + *

+ * This method is called only once during the lifetime of a data store + * instance and the return value is cached in memory, so it's no problem + * if the implementation is slow. + * + * @return reference key + * @throws DataStoreException if the key is not available + */ + protected byte[] getOrCreateReferenceKey() throws DataStoreException { + byte[] referenceKeyValue = new byte[256]; + new SecureRandom().nextBytes(referenceKeyValue); + return referenceKeyValue; + } + + //-----------------------------------------------------------< private >-- + + /** + * Returns the reference key of this data store. Synchronized to + * control concurrent access to the cached {@link #referenceKey} value. + * + * @return reference key + * @throws DataStoreException if the key is not available + */ + private synchronized byte[] getReferenceKey() throws DataStoreException { + if (referenceKey == null) { + referenceKey = getOrCreateReferenceKey(); + } + return referenceKey; + } + +} \ No newline at end of file Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/Backend.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/Backend.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/Backend.java (working copy) @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.core.data; + +import java.io.File; +import java.io.InputStream; +import java.util.Iterator; +import java.util.List; + + + +/** + * The interface defines the backend which can be plugged into + * {@link CachingDataStore}. + */ +public interface Backend { + + /** + * This method initialize backend with the configuration. + * + * @param store {@link CachingDataStore} + * @param homeDir path of repository home dir. + * @param config path of config property file. + * @throws DataStoreException + */ + void init(CachingDataStore store, String homeDir, String config) + throws DataStoreException; + + /** + * Return inputstream of record identified by identifier. + * + * @param identifier identifier of record. + * @return inputstream of the record. + * @throws DataStoreException if record not found or any error. + */ + InputStream read(DataIdentifier identifier) throws DataStoreException; + + /** + * Return length of record identified by identifier. + * + * @param identifier identifier of record. + * @return length of the record. + * @throws DataStoreException if record not found or any error. + */ + long getLength(DataIdentifier identifier) throws DataStoreException; + + /** + * Return lastModified of record identified by identifier. + * + * @param identifier identifier of record. + * @return lastModified of the record. + * @throws DataStoreException if record not found or any error. + */ + long getLastModified(DataIdentifier identifier) throws DataStoreException; + + /** + * Stores file to backend with identifier used as key. If key pre-exists, it + * updates the timestamp of the key. + * + * @param identifier key of the file + * @param file file that would be stored in backend. + * @throws DataStoreException for any error. + */ + void write(DataIdentifier identifier, File file) throws DataStoreException; + + /** + * Returns identifiers of all records that exists in backend. + * @return iterator consisting of all identifiers + * @throws DataStoreException + */ + Iterator getAllIdentifiers() throws DataStoreException; + + /** + * Update timestamp of record identified by identifier if minModifiedDate is + * greater than record's lastModified else no op. + * + * @throws DataStoreException if record not found. + */ + void touch(DataIdentifier identifier, long minModifiedDate) + throws DataStoreException; + /** + * This method check the existence of record in backend. + * @param identifier identifier to be checked. + * @return true if records exists else false. + * @throws DataStoreException + */ + boolean exists(DataIdentifier identifier) throws DataStoreException; + + /** + * Close backend and release resources like database connection if any. + * @throws DataStoreException + */ + void close() throws DataStoreException; + + /** + * Delete all records which are older than timestamp. + * @param timestamp + * @return list of identifiers which are deleted. + * @throws DataStoreException + */ + List deleteAllOlderThan(long timestamp) throws DataStoreException; + + /** + * Delete record identified by identifier. No-op if identifier not found. + * @param identifier + * @throws DataStoreException + */ + void deleteRecord(DataIdentifier identifier) throws DataStoreException; +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/CachingDataRecord.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/CachingDataRecord.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/CachingDataRecord.java (working copy) @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.core.data; + +import java.io.InputStream; + + +/** + * CachingDataRecord which stores reference to {@link CachingDataStore}. This + * class doesn't store any references to attributes but attributes are fetched + * on demand from {@link CachingDataStore}. + */ +public class CachingDataRecord extends AbstractDataRecord { + + private final CachingDataStore store; + + public CachingDataRecord(CachingDataStore store, DataIdentifier identifier) { + super(store, identifier); + this.store = store; + } + + @Override + public long getLastModified() { + try { + return store.getLastModified(getIdentifier()); + } catch (DataStoreException dse) { + return 0; + } + } + + @Override + public long getLength() throws DataStoreException { + return store.getLength(getIdentifier()); + } + + @Override + public InputStream getStream() throws DataStoreException { + return store.getStream(getIdentifier()); + } + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/CachingDataStore.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/CachingDataStore.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/CachingDataStore.java (working copy) @@ -0,0 +1,605 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.core.data; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.UnsupportedEncodingException; +import java.lang.ref.WeakReference; +import java.security.DigestOutputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.WeakHashMap; + +import javax.jcr.RepositoryException; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A caching data store that consists of {@link LocalCache} and {@link Backend}. + * {@link Backend} is single source of truth. All methods first try to fetch + * information from {@link LocalCache}. If record is not available in + * {@link LocalCache}, then it is fetched from {@link Backend} and saved to + * {@link LocalCache} for further access. This class is designed to work without + * {@link LocalCache} and then all information is fetched from {@link Backend}. + * To disable {@link LocalCache} set {@link #setCacheSize(long)} to 0. * + * Configuration: + * + *

+ * <DataStore class="org.apache.jackrabbit.aws.ext.ds.CachingDataStore">
+ * 
+ *     <param name="{@link #setPath(String) path}" value="/data/datastore"/>
+ *     <param name="{@link #setConfig(String) config}" value="${rep.home}/backend.properties"/>
+ *     <param name="{@link #setCacheSize(long) cacheSize}" value="68719476736"/>
+ *     <param name="{@link #setSecret(String) secret}" value="123456"/>
+ *     <param name="{@link #setCachePurgeTrigFactor(double)}" value="0.95d"/>
+ *     <param name="{@link #setCachePurgeResizeFactor(double) cacheSize}" value="0.85d"/>
+ *     <param name="{@link #setMinRecordLength(int) minRecordLength}" value="1024"/>
+ * </DataStore>
+ */
+public abstract class CachingDataStore extends AbstractDataStore implements
+        MultiDataStoreAware {
+
+    /**
+     * Logger instance.
+     */
+    private static final Logger LOG = LoggerFactory.getLogger(CachingDataStore.class);
+
+    /**
+     * The digest algorithm used to uniquely identify records.
+     */
+    private static final String DIGEST = "SHA-1";
+
+    private static final String DS_STORE = ".DS_Store";
+
+    /**
+     * Name of the directory used for temporary files. Must be at least 3
+     * characters.
+     */
+    private static final String TMP = "tmp";
+
+    /**
+     * All data identifiers that are currently in use are in this set until they
+     * are garbage collected.
+     */
+    protected Map> inUse =
+            Collections.synchronizedMap(new WeakHashMap>());
+
+    protected Backend backend;
+
+    /**
+     * The minimum size of an object that should be stored in this data store.
+     */
+    private int minRecordLength = 16 * 1024;
+
+    private String path;
+
+    private File directory;
+
+    private File tmpDir;
+
+    private String secret;
+
+    /**
+     * The optional backend configuration.
+     */
+    private String config;
+
+    /**
+     * The minimum modified date. If a file is accessed (read or write) with a
+     * modified date older than this value, the modified date is updated to the
+     * current time.
+     */
+    private long minModifiedDate;
+
+    /**
+     * Cache purge trigger factor. Cache will undergo in auto-purge mode if
+     * cache current size is greater than cachePurgeTrigFactor * cacheSize
+     */
+    private double cachePurgeTrigFactor = 0.95d;
+
+    /**
+     * Cache resize factor. After auto-purge mode, cache current size would just
+     * greater than cachePurgeResizeFactor * cacheSize cacheSize
+     */
+    private double cachePurgeResizeFactor = 0.85d;
+
+    /**
+     * The number of bytes in the cache. The default value is 64 GB.
+     */
+    private long cacheSize = 64L * 1024 * 1024 * 1024;
+
+    /**
+     * The local file system cache.
+     */
+    private LocalCache cache;
+
+    protected abstract Backend createBackend();
+
+    protected abstract String getMarkerFile();
+
+    /**
+     * Initialized the data store. If the path is not set, <repository
+     * home>/repository/datastore is used. This directory is automatically
+     * created if it does not yet exist. During first initialization, it upload
+     * all files from local datastore to backed and local datastore act as a
+     * local cache.
+     */
+    @Override
+    public void init(String homeDir) throws RepositoryException {
+        if (path == null) {
+            path = homeDir + "/repository/datastore";
+        }
+        directory = new File(path);
+        try {
+            mkdirs(directory);
+        } catch (IOException e) {
+            throw new DataStoreException("Could not create directory "
+                    + directory.getAbsolutePath(), e);
+        }
+        tmpDir = new File(homeDir, "/repository/s3tmp");
+        try {
+            if (!mkdirs(tmpDir)) {
+                FileUtils.cleanDirectory(tmpDir);
+                LOG.info("tmp = " + tmpDir.getPath() + " cleaned");
+            }
+        } catch (IOException e) {
+            throw new DataStoreException("Could not create directory "
+                    + tmpDir.getAbsolutePath(), e);
+        }
+        LOG.info("cachePurgeTrigFactor = " + cachePurgeTrigFactor
+                + ", cachePurgeResizeFactor = " + cachePurgeResizeFactor);
+        backend = createBackend();
+        backend.init(this, path, config);
+        String markerFileName = getMarkerFile();
+        if (markerFileName != null) {
+            // create marker file in homeDir to avoid deletion in cache cleanup.
+            File markerFile = new File(homeDir, markerFileName);
+            if (!markerFile.exists()) {
+                LOG.info("load files from local cache");
+                loadFilesFromCache();
+                try {
+                    markerFile.createNewFile();
+                } catch (IOException e) {
+                    throw new DataStoreException(
+                            "Could not create marker file "
+                                    + markerFile.getAbsolutePath(), e);
+                }
+            } else {
+                LOG.info("marker file = " + markerFile.getAbsolutePath()
+                        + " exists");
+            }
+        }
+        cache = new LocalCache(path, tmpDir.getAbsolutePath(), cacheSize,
+                cachePurgeTrigFactor, cachePurgeResizeFactor);
+    }
+
+    /**
+     * Creates a new data record in {@link Backend}. The stream is first
+     * consumed and the contents are saved in a temporary file and the SHA-1
+     * message digest of the stream is calculated. If a record with the same
+     * SHA-1 digest (and length) is found then it is returned. Otherwise new
+     * record is created in {@link Backend} and the temporary file is moved in
+     * place to {@link LocalCache}.
+     * 
+     * @param input
+     *            binary stream
+     * @return {@link CachingDataRecord}
+     * @throws DataStoreException
+     *             if the record could not be created.
+     */
+    @Override
+    public DataRecord addRecord(InputStream input) throws DataStoreException {
+        File temporary = null;
+        try {
+            temporary = newTemporaryFile();
+            DataIdentifier tempId = new DataIdentifier(temporary.getName());
+            usesIdentifier(tempId);
+            // Copy the stream to the temporary file and calculate the
+            // stream length and the message digest of the stream
+            MessageDigest digest = MessageDigest.getInstance(DIGEST);
+            OutputStream output = new DigestOutputStream(new FileOutputStream(
+                    temporary), digest);
+            try {
+                IOUtils.copyLarge(input, output);
+            } finally {
+                output.close();
+            }
+            DataIdentifier identifier = new DataIdentifier(
+                    encodeHexString(digest.digest()));
+            synchronized (this) {
+                usesIdentifier(identifier);
+                backend.write(identifier, temporary);
+                String fileName = getFileName(identifier);
+                cache.store(fileName, temporary);
+            }
+            // this will also make sure that
+            // tempId is not garbage collected until here
+            inUse.remove(tempId);
+            return new CachingDataRecord(this, identifier);
+        } catch (NoSuchAlgorithmException e) {
+            throw new DataStoreException(DIGEST + " not available", e);
+        } catch (IOException e) {
+            throw new DataStoreException("Could not add record", e);
+        } finally {
+            if (temporary != null) {
+                // try to delete - but it's not a big deal if we can't
+                temporary.delete();
+            }
+        }
+    }
+
+    /**
+     * Get a data record for the given identifier or null it data record doesn't
+     * exist in {@link Backend}
+     * 
+     * @param identifier
+     *            identifier of record.
+     * @return the {@link CachingDataRecord} or null.
+     */
+    @Override
+    public DataRecord getRecordIfStored(DataIdentifier identifier)
+            throws DataStoreException {
+        synchronized (this) {
+            usesIdentifier(identifier);
+            if (!backend.exists(identifier)) {
+                return null;
+            }
+            backend.touch(identifier, minModifiedDate);
+            return new CachingDataRecord(this, identifier);
+        }
+    }
+
+    @Override
+    public void updateModifiedDateOnAccess(long before) {
+        LOG.info("minModifiedDate set to: " + before);
+        minModifiedDate = before;
+    }
+
+    /**
+     * Retrieves all identifiers from {@link Backend}.
+     */
+    @Override
+    public Iterator getAllIdentifiers()
+            throws DataStoreException {
+        return backend.getAllIdentifiers();
+    }
+
+    /**
+     * This method deletes record from {@link Backend} and then from
+     * {@link LocalCache}
+     */
+    @Override
+    public void deleteRecord(DataIdentifier identifier)
+            throws DataStoreException {
+        String fileName = getFileName(identifier);
+        synchronized (this) {
+            backend.deleteRecord(identifier);
+            cache.delete(fileName);
+        }
+    }
+
+    @Override
+    public synchronized int deleteAllOlderThan(long min)
+            throws DataStoreException {
+        List diList = backend.deleteAllOlderThan(min);
+        // remove entries from local cache
+        for (DataIdentifier identifier : diList) {
+            cache.delete(getFileName(identifier));
+        }
+        return diList.size();
+    }
+
+    /**
+     * Get stream of record from {@link LocalCache}. If record is not available
+     * in {@link LocalCache}, this method fetches record from {@link Backend}
+     * and stores it to {@link LocalCache}. Stream is then returned from cached
+     * record.
+     */
+    InputStream getStream(DataIdentifier identifier) throws DataStoreException {
+        InputStream in = null;
+        try {
+            String fileName = getFileName(identifier);
+            InputStream cached = cache.getIfStored(fileName);
+            if (cached != null) {
+                return cached;
+            }
+            in = backend.read(identifier);
+            return cache.store(fileName, in);
+        } catch (IOException e) {
+            throw new DataStoreException("IO Exception: " + identifier, e);
+        } finally {
+            IOUtils.closeQuietly(in);
+        }
+    }
+
+    /**
+     * Return lastModified of record from {@link Backend} assuming
+     * {@link Backend} as a single source of truth.
+     */
+    public long getLastModified(DataIdentifier identifier) throws DataStoreException {
+        LOG.info("accessed lastModified");
+        return backend.getLastModified(identifier);
+    }
+
+    /**
+     * Return the length of record from {@link LocalCache} if available,
+     * otherwise retrieve it from {@link Backend}.
+     */
+    public long getLength(DataIdentifier identifier) throws DataStoreException {
+        String fileName = getFileName(identifier);
+        Long length = cache.getFileLength(fileName);
+        if (length != null) {
+            return length.longValue();
+        }
+        return backend.getLength(identifier);
+    }
+
+    @Override
+    protected byte[] getOrCreateReferenceKey() throws DataStoreException {
+        try {
+            return secret.getBytes("UTF-8");
+        } catch (UnsupportedEncodingException e) {
+            throw new DataStoreException(e);
+        }
+    }
+
+    /**
+     * Returns a unique temporary file to be used for creating a new data
+     * record.
+     */
+    private File newTemporaryFile() throws IOException {
+        return File.createTempFile(TMP, null, tmpDir);
+    }
+
+    /**
+     * Load files from {@link LocalCache} to {@link Backend}.
+     */
+    private void loadFilesFromCache() throws RepositoryException {
+        ArrayList files = new ArrayList();
+        listRecursive(files, directory);
+        long totalSize = 0;
+        for (File f : files) {
+            totalSize += f.length();
+        }
+        long currentSize = 0;
+        long time = System.currentTimeMillis();
+        for (File f : files) {
+            long now = System.currentTimeMillis();
+            if (now > time + 5000) {
+                LOG.info("Uploaded {" + currentSize + "}/{" + totalSize + "}");
+                time = now;
+            }
+            currentSize += f.length();
+            String name = f.getName();
+            LOG.debug("upload file = " + name);
+            if (!name.startsWith(TMP) && !name.endsWith(DS_STORE)
+                    && f.length() > 0) {
+                loadFileToBackEnd(f);
+            }
+        }
+        LOG.info("Uploaded {" + currentSize + "}/{" + totalSize + "}");
+    }
+
+    /**
+     * Traverse recursively and populate list with files.
+     */
+    private void listRecursive(List list, File file) {
+        File[] files = file.listFiles();
+        if (files != null) {
+            for (File f : files) {
+                if (f.isDirectory()) {
+                    listRecursive(list, f);
+                } else {
+                    list.add(f);
+                }
+            }
+        }
+    }
+
+    /**
+     * Upload file from {@link LocalCache} to {@link Backend}.
+     * 
+     * @param f
+     *            file to uploaded.
+     * @throws DataStoreException
+     */
+    private void loadFileToBackEnd(File f) throws DataStoreException {
+        DataIdentifier identifier = new DataIdentifier(f.getName());
+        usesIdentifier(identifier);
+        backend.write(identifier, f);
+        LOG.debug(f.getName() + "uploaded.");
+
+    }
+
+    /**
+     * Derive file name from identifier.
+     */
+    private static String getFileName(DataIdentifier identifier) {
+        String name = identifier.toString();
+        name = name.substring(0, 2) + "/" + name.substring(2, 4) + "/"
+                + name.substring(4, 6) + "/" + name;
+        return name;
+    }
+
+    private void usesIdentifier(DataIdentifier identifier) {
+        inUse.put(identifier, new WeakReference(identifier));
+    }
+
+    private static boolean mkdirs(File dir) throws IOException {
+        if (dir.exists()) {
+            if (dir.isFile()) {
+                throw new IOException("Can not create a directory "
+                        + "because a file exists with the same name: "
+                        + dir.getAbsolutePath());
+            }
+            return false;
+        }
+        boolean created = dir.mkdirs();
+        if (!created) {
+            throw new IOException("Could not create directory: "
+                    + dir.getAbsolutePath());
+        }
+        return created;
+    }
+
+    @Override
+    public void clearInUse() {
+        inUse.clear();
+    }
+
+    public boolean isInUse(DataIdentifier identifier) {
+        return inUse.containsKey(identifier);
+    }
+
+    @Override
+    public void close() throws DataStoreException {
+        cache.close();
+        backend.close();
+        cache = null;
+    }
+
+    /**
+     * Setter for configuration based secret
+     * 
+     * @param secret
+     *            the secret used to sign reference binaries
+     */
+    public void setSecret(String secret) {
+        this.secret = secret;
+    }
+
+    /**
+     * Set the minimum object length.
+     * 
+     * @param minRecordLength
+     *            the length
+     */
+    public void setMinRecordLength(int minRecordLength) {
+        this.minRecordLength = minRecordLength;
+    }
+
+    /**
+     * Return mininum object length.
+     */
+    @Override
+    public int getMinRecordLength() {
+        return minRecordLength;
+    }
+
+    /**
+     * Return path of configuration properties.
+     * 
+     * @return path of configuration properties.
+     */
+    public String getConfig() {
+        return config;
+    }
+
+    /**
+     * Set the configuration properties path.
+     * 
+     * @param config
+     *            path of configuration properties.
+     */
+    public void setConfig(String config) {
+        this.config = config;
+    }
+
+    /**
+     * @return size of {@link LocalCache}.
+     */
+    public long getCacheSize() {
+        return cacheSize;
+    }
+
+    /**
+     * Set size of {@link LocalCache}.
+     * 
+     * @param cacheSize
+     *            size of {@link LocalCache}.
+     */
+    public void setCacheSize(long cacheSize) {
+        this.cacheSize = cacheSize;
+    }
+
+    /**
+     * 
+     * @return path of {@link LocalCache}.
+     */
+    public String getPath() {
+        return path;
+    }
+
+    /**
+     * Set path of {@link LocalCache}.
+     * 
+     * @param path
+     *            of {@link LocalCache}.
+     */
+    public void setPath(String path) {
+        this.path = path;
+    }
+
+    /**
+     * @return Purge trigger factor of {@link LocalCache}.
+     */
+    public double getCachePurgeTrigFactor() {
+        return cachePurgeTrigFactor;
+    }
+
+    /**
+     * Set purge trigger factor of {@link LocalCache}.
+     * 
+     * @param cachePurgeTrigFactor
+     *            purge trigger factor.
+     */
+    public void setCachePurgeTrigFactor(double cachePurgeTrigFactor) {
+        this.cachePurgeTrigFactor = cachePurgeTrigFactor;
+    }
+
+    /**
+     * @return Purge resize factor of {@link LocalCache}.
+     */
+    public double getCachePurgeResizeFactor() {
+        return cachePurgeResizeFactor;
+    }
+
+    /**
+     * Set purge resize factor of {@link LocalCache}.
+     * 
+     * @param cachePurgeResizeFactor
+     *            purge resize factor.
+     */
+    public void setCachePurgeResizeFactor(double cachePurgeResizeFactor) {
+        this.cachePurgeResizeFactor = cachePurgeResizeFactor;
+    }
+
+}
Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/DataIdentifier.java
===================================================================
--- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/DataIdentifier.java	(revision 0)
+++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/DataIdentifier.java	(working copy)
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.data;
+
+import java.io.Serializable;
+
+/**
+ * Opaque data identifier used to identify records in a data store.
+ * All identifiers must be serializable and implement the standard
+ * object equality and hash code methods.
+ */
+public class DataIdentifier implements Serializable {
+
+    /**
+     * Serial version UID.
+     */
+    private static final long serialVersionUID = -9197191401131100016L;
+
+    /**
+     * Data identifier.
+     */
+    private final String identifier;
+
+    /**
+     * Creates a data identifier from the given string.
+     *
+     * @param identifier data identifier
+     */
+    public DataIdentifier(String identifier) {
+        this.identifier  = identifier;
+    }
+
+    //-------------------------------------------------------------< Object >
+
+    /**
+     * Returns the identifier string.
+     *
+     * @return identifier string
+     */
+    public String toString() {
+        return identifier;
+    }
+
+    /**
+     * Checks if the given object is a data identifier and has the same
+     * string representation as this one.
+     *
+     * @param object other object
+     * @return true if the given object is the same identifier,
+     *         false otherwise
+     */
+    public boolean equals(Object object) {
+        return (object instanceof DataIdentifier)
+            && identifier.equals(object.toString());
+    }
+
+    /**
+     * Returns the hash code of the identifier string.
+     *
+     * @return hash code
+     */
+    public int hashCode() {
+        return identifier.hashCode();
+    }
+
+}
Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/DataRecord.java
===================================================================
--- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/DataRecord.java	(revision 0)
+++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/DataRecord.java	(working copy)
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.data;
+
+import java.io.InputStream;
+
+/**
+ * Immutable data record that consists of a binary stream.
+ */
+public interface DataRecord {
+
+    /**
+     * Returns the identifier of this record.
+     *
+     * @return data identifier
+     */
+    DataIdentifier getIdentifier();
+
+    /**
+     * Returns a secure reference to this binary, or {@code null} if no such
+     * reference is available.
+     *
+     * @return binary reference, or {@code null}
+     */
+    String getReference();
+
+    /**
+     * Returns the length of the binary stream in this record.
+     *
+     * @return length of the binary stream
+     * @throws DataStoreException if the record could not be accessed
+     */
+    long getLength() throws DataStoreException;
+
+    /**
+     * Returns the the binary stream in this record.
+     *
+     * @return binary stream
+     * @throws DataStoreException if the record could not be accessed
+     */
+    InputStream getStream() throws DataStoreException;
+
+    /**
+     * Returns the last modified of the record.
+     * 
+     * @return last modified time of the binary stream
+     */
+    long getLastModified();
+}
Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/DataStore.java
===================================================================
--- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/DataStore.java	(revision 0)
+++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/DataStore.java	(working copy)
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.core.data;
+
+import java.io.InputStream;
+import java.util.Iterator;
+
+import javax.jcr.RepositoryException;
+
+/**
+ * Append-only store for binary streams. A data store consists of a number
+ * of identifiable data records that each contain a distinct binary stream.
+ * New binary streams can be added to the data store, but existing streams
+ * are never removed or modified.
+ * 

+ * A data store should be fully thread-safe, i.e. it should be possible to + * add and access data records concurrently. Optimally even separate processes + * should be able to concurrently access the data store with zero interprocess + * synchronization. + */ +public interface DataStore { + + /** + * Check if a record for the given identifier exists, and return it if yes. + * If no record exists, this method returns null. + * + * @param identifier data identifier + * @return the record if found, and null if not + * @throws DataStoreException if the data store could not be accessed + */ + DataRecord getRecordIfStored(DataIdentifier identifier) + throws DataStoreException; + + /** + * Returns the identified data record. The given identifier should be + * the identifier of a previously saved data record. Since records are + * never removed, there should never be cases where the identified record + * is not found. Abnormal cases like that are treated as errors and + * handled by throwing an exception. + * + * @param identifier data identifier + * @return identified data record + * @throws DataStoreException if the data store could not be accessed, + * or if the given identifier is invalid + */ + DataRecord getRecord(DataIdentifier identifier) throws DataStoreException; + + /** + * Returns the record that matches the given binary reference. + * Returns {@code null} if the reference is invalid, for example if it + * points to a record that does not exist. + * + * @param reference binary reference + * @return matching record, or {@code null} + * @throws DataStoreException if the data store could not be accessed + */ + DataRecord getRecordFromReference(String reference) + throws DataStoreException; + + /** + * Creates a new data record. The given binary stream is consumed and + * a binary record containing the consumed stream is created and returned. + * If the same stream already exists in another record, then that record + * is returned instead of creating a new one. + *

+ * The given stream is consumed and not closed by this + * method. It is the responsibility of the caller to close the stream. + * A typical call pattern would be: + *

+     *     InputStream stream = ...;
+     *     try {
+     *         record = store.addRecord(stream);
+     *     } finally {
+     *         stream.close();
+     *     }
+     * 
+ * + * @param stream binary stream + * @return data record that contains the given stream + * @throws DataStoreException if the data store could not be accessed + */ + DataRecord addRecord(InputStream stream) throws DataStoreException; + + /** + * From now on, update the modified date of an object even when accessing it. + * Usually, the modified date is only updated when creating a new object, + * or when a new link is added to an existing object. When this setting is enabled, + * even getLength() will update the modified date. + * + * @param before - update the modified date to the current time if it is older than this value + */ + void updateModifiedDateOnAccess(long before); + + /** + * Delete objects that have a modified date older than the specified date. + * + * @param min the minimum time + * @return the number of data records deleted + * @throws DataStoreException + */ + int deleteAllOlderThan(long min) throws DataStoreException; + + /** + * Get all identifiers. + * + * @return an iterator over all DataIdentifier objects + * @throws DataStoreException if the list could not be read + */ + Iterator getAllIdentifiers() throws DataStoreException; + + /** + * Initialized the data store + * + * @param homeDir the home directory of the repository + * @throws RepositoryException + */ + void init(String homeDir) throws RepositoryException; + + /** + * Get the minimum size of an object that should be stored in this data store. + * Depending on the overhead and configuration, each store may return a different value. + * + * @return the minimum size in bytes + */ + int getMinRecordLength(); + + /** + * Close the data store + * + * @throws DataStoreException if a problem occurred + */ + void close() throws DataStoreException; + + /** + * Clear the in-use list. This is only used for testing to make the the garbage collection + * think that objects are no longer in use. + */ + void clearInUse(); + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/DataStoreException.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/DataStoreException.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/DataStoreException.java (working copy) @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.data; + +import javax.jcr.RepositoryException; + +/** + * Exception thrown by the Data Store module. + */ +public class DataStoreException extends RepositoryException { + + /** + * Constructs a new instance of this class with the specified detail + * message. + * + * @param message the detailed message. + */ + public DataStoreException(String message) { + super(message); + } + + /** + * Constructs a new instance of this class with the specified detail + * message and root cause. + * + * @param message the detailed message. + * @param cause root failure cause + */ + public DataStoreException(String message, Throwable cause) { + super(message, cause); + } + + /** + * Constructs a new instance of this class with the specified root cause. + * + * @param rootCause root failure cause + */ + public DataStoreException(Throwable rootCause) { + super(rootCause); + } + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/DataStoreFactory.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/DataStoreFactory.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/DataStoreFactory.java (working copy) @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.data; + +import javax.jcr.RepositoryException; + + +/** + * Factory interface for creating {@link DataStore} instances. Used + * to decouple the repository internals from the repository configuration + * mechanism. + * + * @since Jackrabbit 1.5 + * @see JCR-1438 + */ +public interface DataStoreFactory { + + /** + * Creates, initializes, and returns a {@link DataStore} instance + * for use by the repository. Note that no information is passed from + * the client, so all required configuration information must be + * encapsulated in the factory. + * + * @return initialized data store + * @throws RepositoryException if the data store can not be created + */ + DataStore getDataStore() throws RepositoryException; + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/FileDataRecord.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/FileDataRecord.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/FileDataRecord.java (working copy) @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.data; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; + + +/** + * Data record that is based on a normal file. + */ +public class FileDataRecord extends AbstractDataRecord { + + /** + * The file that contains the binary stream. + */ + private final File file; + + /** + * Creates a data record based on the given identifier and file. + * + * @param identifier data identifier + * @param file file that contains the binary stream + */ + public FileDataRecord( + AbstractDataStore store, DataIdentifier identifier, File file) { + super(store, identifier); + assert file.isFile(); + this.file = file; + } + + /** + * {@inheritDoc} + */ + public long getLength() { + return file.length(); + } + + /** + * {@inheritDoc} + */ + public InputStream getStream() throws DataStoreException { + try { + return new LazyFileInputStream(file); + } catch (IOException e) { + throw new DataStoreException("Error opening input stream of " + file.getAbsolutePath(), e); + } + } + + /** + * {@inheritDoc} + */ + public long getLastModified() { + return file.lastModified(); + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/FileDataStore.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/FileDataStore.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/FileDataStore.java (working copy) @@ -0,0 +1,481 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.data; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.RandomAccessFile; +import java.lang.ref.WeakReference; +import java.security.DigestOutputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.WeakHashMap; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Simple file-based data store. Data records are stored as normal files + * named using a message digest of the contained binary stream. + * + * Configuration: + *
+ * <DataStore class="org.apache.jackrabbit.core.data.FileDataStore">
+ *     <param name="{@link #setPath(String) path}" value="/data/datastore"/>
+ *     <param name="{@link #setMinRecordLength(int) minRecordLength}" value="1024"/>
+ * </DataStore>
+ * 
+ *

+ * If the directory is not set, the directory <repository home>/repository/datastore is used. + *

+ * A three level directory structure is used to avoid placing too many + * files in a single directory. The chosen structure is designed to scale + * up to billions of distinct records. + *

+ * This implementation relies on the underlying file system to support + * atomic O(1) move operations with {@link File#renameTo(File)}. + */ +public class FileDataStore extends AbstractDataStore + implements MultiDataStoreAware { + + /** + * Logger instance + */ + private static Logger log = LoggerFactory.getLogger(FileDataStore.class); + + /** + * The digest algorithm used to uniquely identify records. + */ + private static final String DIGEST = "SHA-1"; + + /** + * The default value for the minimum object size. + */ + private static final int DEFAULT_MIN_RECORD_LENGTH = 100; + + /** + * The maximum last modified time resolution of the file system. + */ + private static final int ACCESS_TIME_RESOLUTION = 2000; + + /** + * Name of the directory used for temporary files. + * Must be at least 3 characters. + */ + private static final String TMP = "tmp"; + + /** + * The minimum modified date. If a file is accessed (read or write) with a modified date + * older than this value, the modified date is updated to the current time. + */ + private long minModifiedDate; + + /** + * The directory that contains all the data record files. The structure + * of content within this directory is controlled by this class. + */ + private File directory; + + /** + * The name of the directory that contains all the data record files. The structure + * of content within this directory is controlled by this class. + */ + private String path; + + /** + * The minimum size of an object that should be stored in this data store. + */ + private int minRecordLength = DEFAULT_MIN_RECORD_LENGTH; + + /** + * All data identifiers that are currently in use are in this set until they are garbage collected. + */ + protected Map> inUse = + Collections.synchronizedMap(new WeakHashMap>()); + + /** + * Initialized the data store. + * If the path is not set, <repository home>/repository/datastore is used. + * This directory is automatically created if it does not yet exist. + * + * @param homeDir + */ + public void init(String homeDir) { + if (path == null) { + path = homeDir + "/repository/datastore"; + } + directory = new File(path); + directory.mkdirs(); + } + + /** + * Get a data record for the given identifier. + * + * @param identifier the identifier + * @return the data record or null + */ + public DataRecord getRecordIfStored(DataIdentifier identifier) throws DataStoreException { + File file = getFile(identifier); + synchronized (this) { + if (!file.exists()) { + return null; + } + if (minModifiedDate != 0) { + // only check when running garbage collection + if (getLastModified(file) < minModifiedDate) { + setLastModified(file, System.currentTimeMillis() + ACCESS_TIME_RESOLUTION); + } + } + usesIdentifier(identifier); + return new FileDataRecord(this, identifier, file); + } + } + + private void usesIdentifier(DataIdentifier identifier) { + inUse.put(identifier, new WeakReference(identifier)); + } + + /** + * Creates a new data record. + * The stream is first consumed and the contents are saved in a temporary file + * and the SHA-1 message digest of the stream is calculated. If a + * record with the same SHA-1 digest (and length) is found then it is + * returned. Otherwise the temporary file is moved in place to become + * the new data record that gets returned. + * + * @param input binary stream + * @return data record that contains the given stream + * @throws DataStoreException if the record could not be created + */ + public DataRecord addRecord(InputStream input) throws DataStoreException { + File temporary = null; + try { + temporary = newTemporaryFile(); + DataIdentifier tempId = new DataIdentifier(temporary.getName()); + usesIdentifier(tempId); + // Copy the stream to the temporary file and calculate the + // stream length and the message digest of the stream + long length = 0; + MessageDigest digest = MessageDigest.getInstance(DIGEST); + OutputStream output = new DigestOutputStream( + new FileOutputStream(temporary), digest); + try { + length = IOUtils.copyLarge(input, output); + } finally { + output.close(); + } + DataIdentifier identifier = + new DataIdentifier(encodeHexString(digest.digest())); + File file; + + synchronized (this) { + // Check if the same record already exists, or + // move the temporary file in place if needed + usesIdentifier(identifier); + file = getFile(identifier); + if (!file.exists()) { + File parent = file.getParentFile(); + parent.mkdirs(); + if (temporary.renameTo(file)) { + // no longer need to delete the temporary file + temporary = null; + } else { + throw new IOException( + "Can not rename " + temporary.getAbsolutePath() + + " to " + file.getAbsolutePath() + + " (media read only?)"); + } + } else { + long now = System.currentTimeMillis(); + if (getLastModified(file) < now + ACCESS_TIME_RESOLUTION) { + setLastModified(file, now + ACCESS_TIME_RESOLUTION); + } + } + if (file.length() != length) { + // Sanity checks on the record file. These should never fail, + // but better safe than sorry... + if (!file.isFile()) { + throw new IOException("Not a file: " + file); + } + throw new IOException(DIGEST + " collision: " + file); + } + } + // this will also make sure that + // tempId is not garbage collected until here + inUse.remove(tempId); + return new FileDataRecord(this, identifier, file); + } catch (NoSuchAlgorithmException e) { + throw new DataStoreException(DIGEST + " not available", e); + } catch (IOException e) { + throw new DataStoreException("Could not add record", e); + } finally { + if (temporary != null) { + temporary.delete(); + } + } + } + + /** + * Returns the identified file. This method implements the pattern + * used to avoid problems with too many files in a single directory. + *

+ * No sanity checks are performed on the given identifier. + * + * @param identifier data identifier + * @return identified file + */ + private File getFile(DataIdentifier identifier) { + usesIdentifier(identifier); + String string = identifier.toString(); + File file = directory; + file = new File(file, string.substring(0, 2)); + file = new File(file, string.substring(2, 4)); + file = new File(file, string.substring(4, 6)); + return new File(file, string); + } + + /** + * Returns a unique temporary file to be used for creating a new + * data record. + * + * @return temporary file + * @throws IOException + */ + private File newTemporaryFile() throws IOException { + // the directory is already created in the init method + return File.createTempFile(TMP, null, directory); + } + + public void updateModifiedDateOnAccess(long before) { + minModifiedDate = before; + } + + public void deleteRecord(DataIdentifier identifier) + throws DataStoreException { + File file = getFile(identifier); + synchronized (this) { + if (file.exists()) { + if (!file.delete()) { + log.warn("Failed to delete file " + file.getAbsolutePath()); + } + } + } + } + + public int deleteAllOlderThan(long min) { + int count = 0; + for (File file : directory.listFiles()) { + if (file.isDirectory()) { // skip top-level files + count += deleteOlderRecursive(file, min); + } + } + return count; + } + + private int deleteOlderRecursive(File file, long min) { + int count = 0; + if (file.isFile() && file.exists() && file.canWrite()) { + synchronized (this) { + long lastModified; + try { + lastModified = getLastModified(file); + } catch (DataStoreException e) { + log.warn("Failed to read modification date; file not deleted", e); + // don't delete the file, since the lastModified date is uncertain + lastModified = min; + } + if (lastModified < min) { + DataIdentifier id = new DataIdentifier(file.getName()); + if (!inUse.containsKey(id)) { + if (log.isInfoEnabled()) { + log.info("Deleting old file " + file.getAbsolutePath() + + " modified: " + new Timestamp(lastModified).toString() + + " length: " + file.length()); + } + if (!file.delete()) { + log.warn("Failed to delete old file " + file.getAbsolutePath()); + } + count++; + } + } + } + } else if (file.isDirectory()) { + File[] list = file.listFiles(); + if (list != null) { + for (File f: list) { + count += deleteOlderRecursive(f, min); + } + } + + // JCR-1396: FileDataStore Garbage Collector and empty directories + // Automatic removal of empty directories (but not the root!) + synchronized (this) { + list = file.listFiles(); + if (list != null && list.length == 0) { + file.delete(); + } + } + } + return count; + } + + private void listRecursive(List list, File file) { + File[] files = file.listFiles(); + if (files != null) { + for (File f : files) { + if (f.isDirectory()) { + listRecursive(list, f); + } else { + list.add(f); + } + } + } + } + + public Iterator getAllIdentifiers() { + ArrayList files = new ArrayList(); + for (File file : directory.listFiles()) { + if (file.isDirectory()) { // skip top-level files + listRecursive(files, file); + } + } + + ArrayList identifiers = new ArrayList(); + for (File f: files) { + String name = f.getName(); + identifiers.add(new DataIdentifier(name)); + } + log.debug("Found " + identifiers.size() + " identifiers."); + return identifiers.iterator(); + } + + public void clearInUse() { + inUse.clear(); + } + + /** + * Get the name of the directory where this data store keeps the files. + * + * @return the full path name + */ + public String getPath() { + return path; + } + + /** + * Set the name of the directory where this data store keeps the files. + * + * @param directoryName the path name + */ + public void setPath(String directoryName) { + this.path = directoryName; + } + + public int getMinRecordLength() { + return minRecordLength; + } + + /** + * Set the minimum object length. + * + * @param minRecordLength the length + */ + public void setMinRecordLength(int minRecordLength) { + this.minRecordLength = minRecordLength; + } + + public void close() { + // nothing to do + } + + //---------------------------------------------------------< protected >-- + + @Override + protected byte[] getOrCreateReferenceKey() throws DataStoreException { + File file = new File(directory, "reference.key"); + try { + if (file.exists()) { + return FileUtils.readFileToByteArray(file); + } else { + byte[] key = super.getOrCreateReferenceKey(); + FileUtils.writeByteArrayToFile(file, key); + return key; + } + } catch (IOException e) { + throw new DataStoreException( + "Unable to access reference key file " + file.getPath(), e); + } + } + + //-----------------------------------------------------------< private >-- + + /** + * Get the last modified date of a file. + * + * @param file the file + * @return the last modified date + * @throws DataStoreException if reading fails + */ + private static long getLastModified(File file) throws DataStoreException { + long lastModified = file.lastModified(); + if (lastModified == 0) { + throw new DataStoreException("Failed to read record modified date: " + file.getAbsolutePath()); + } + return lastModified; + } + + /** + * Set the last modified date of a file, if the file is writable. + * + * @param file the file + * @param time the new last modified date + * @throws DataStoreException if the file is writable but modifying the date fails + */ + private static void setLastModified(File file, long time) throws DataStoreException { + if (!file.setLastModified(time)) { + if (!file.canWrite()) { + // if we can't write to the file, so garbage collection will also not delete it + // (read only files or file systems) + return; + } + try { + // workaround for Windows: if the file is already open for reading + // (in this or another process), then setting the last modified date + // doesn't work - see also JCR-2872 + RandomAccessFile r = new RandomAccessFile(file, "rw"); + try { + r.setLength(r.length()); + } finally { + r.close(); + } + } catch (IOException e) { + throw new DataStoreException("An IO Exception occurred while trying to set the last modified date: " + file.getAbsolutePath(), e); + } + } + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/LazyFileInputStream.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/LazyFileInputStream.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/LazyFileInputStream.java (working copy) @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.data; + +import java.io.File; +import java.io.FileDescriptor; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; + +import org.apache.commons.io.input.AutoCloseInputStream; + +/** + * This input stream delays opening the file until the first byte is read, and + * closes and discards the underlying stream as soon as the end of input has + * been reached or when the stream is explicitly closed. + */ +public class LazyFileInputStream extends AutoCloseInputStream { + + /** + * The file descriptor to use. + */ + protected final FileDescriptor fd; + + /** + * The file to read from. + */ + protected final File file; + + /** + * True if the input stream was opened. It is also set to true if the stream + * was closed without reading (to avoid opening the file after the stream + * was closed). + */ + protected boolean opened; + + /** + * Creates a new LazyFileInputStream for the given file. If the + * file is unreadable, a FileNotFoundException is thrown. + * The file is not opened until the first byte is read from the stream. + * + * @param file the file + * @throws java.io.FileNotFoundException + */ + public LazyFileInputStream(File file) + throws FileNotFoundException { + super(null); + if (!file.canRead()) { + throw new FileNotFoundException(file.getPath()); + } + this.file = file; + this.fd = null; + } + + /** + * Creates a new LazyFileInputStream for the given file + * descriptor. + * The file is not opened until the first byte is read from the stream. + * + * @param fd + */ + public LazyFileInputStream(FileDescriptor fd) { + super(null); + this.file = null; + this.fd = fd; + } + + /** + * Creates a new LazyFileInputStream for the given file. If the + * file is unreadable, a FileNotFoundException is thrown. + * + * @param name + * @throws java.io.FileNotFoundException + */ + public LazyFileInputStream(String name) throws FileNotFoundException { + this(new File(name)); + } + + /** + * Open the stream if required. + * + * @throws java.io.IOException + */ + protected void open() throws IOException { + if (!opened) { + opened = true; + if (fd != null) { + in = new FileInputStream(fd); + } else { + in = new FileInputStream(file); + } + } + } + + public int read() throws IOException { + open(); + return super.read(); + } + + public int available() throws IOException { + open(); + return super.available(); + } + + public void close() throws IOException { + // make sure the file is not opened afterwards + opened = true; + + // only close the file if it was in fact opened + if (in != null) { + super.close(); + } + } + + public synchronized void reset() throws IOException { + open(); + super.reset(); + } + + public boolean markSupported() { + try { + open(); + } catch (IOException e) { + throw new IllegalStateException(e.toString()); + } + return super.markSupported(); + } + + public synchronized void mark(int readlimit) { + try { + open(); + } catch (IOException e) { + throw new IllegalStateException(e.toString()); + } + super.mark(readlimit); + } + + public long skip(long n) throws IOException { + open(); + return super.skip(n); + } + + public int read(byte[] b) throws IOException { + open(); + return super.read(b, 0, b.length); + } + + public int read(byte[] b, int off, int len) throws IOException { + open(); + return super.read(b, off, len); + } + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/LocalCache.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/LocalCache.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/LocalCache.java (working copy) @@ -0,0 +1,535 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.core.data; + +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Set; + +import javax.jcr.RepositoryException; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.apache.jackrabbit.core.data.LazyFileInputStream; +import org.apache.jackrabbit.util.TransientFileFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class implements a LRU cache used by {@link CachingDataStore}. If cache + * size exceeds limit, this cache goes in purge mode. In purge mode any + * operation to cache is no-op. After purge cache size would be less than + * cachePurgeResizeFactor * maximum size. + */ +public class LocalCache { + + /** + * Logger instance. + */ + static final Logger LOG = LoggerFactory.getLogger(LocalCache.class); + + /** + * The file names of the files that need to be deleted. + */ + final Set toBeDeleted = new HashSet(); + + /** + * The filename Vs file size LRU cache. + */ + LRUCache cache; + + /** + * The directory where the files are created. + */ + private final File directory; + + /** + * The directory where tmp files are created. + */ + private final File tmp; + + /** + * The maximum size of cache in bytes. + */ + private long maxSize; + + /** + * If true cache is in purgeMode and not available. All operation would be + * no-op. + */ + private volatile boolean purgeMode; + + /** + * Build LRU cache of files located at 'path'. It uses lastModified property + * of file to build LRU cache. If cache size exceeds limit size, this cache + * goes in purge mode. In purge mode any operation to cache is no-op. + * + * @param path file system path + * @param tmpPath temporary directory used by cache. + * @param maxSize maximum size of cache. + * @param cachePurgeTrigFactor factor which triggers cache to purge mode. + * That is if current size exceed (cachePurgeTrigFactor * maxSize), the + * cache will go in auto-purge mode. + * @param cachePurgeResizeFactor after cache purge size of cache will be + * just less (cachePurgeResizeFactor * maxSize). + * @throws RepositoryException + */ + public LocalCache(final String path, final String tmpPath, + final long maxSize, final double cachePurgeTrigFactor, + final double cachePurgeResizeFactor) throws RepositoryException { + this.maxSize = maxSize; + directory = new File(path); + tmp = new File(tmpPath); + cache = new LRUCache(maxSize, cachePurgeTrigFactor, + cachePurgeResizeFactor); + ArrayList allFiles = new ArrayList(); + + Iterator it = FileUtils.iterateFiles(directory, null, true); + while (it.hasNext()) { + File f = it.next(); + allFiles.add(f); + } + Collections.sort(allFiles, new Comparator() { + @Override + public int compare(final File o1, final File o2) { + long l1 = o1.lastModified(), l2 = o2.lastModified(); + return l1 < l2 ? -1 : l1 > l2 ? 1 : 0; + } + }); + String dataStorePath = directory.getAbsolutePath(); + long time = System.currentTimeMillis(); + int count = 0; + int deletecount = 0; + for (File f : allFiles) { + if (f.exists()) { + long length = f.length(); + String name = f.getPath(); + if (name.startsWith(dataStorePath)) { + name = name.substring(dataStorePath.length()); + } + // convert to java path format + name = name.replace("\\", "/"); + if (name.startsWith("/") || name.startsWith("\\")) { + name = name.substring(1); + } + if ((cache.currentSizeInBytes + length) < cache.maxSizeInBytes) { + count++; + cache.put(name, length); + } else { + if (tryDelete(name)) { + deletecount++; + } + } + long now = System.currentTimeMillis(); + if (now > time + 5000) { + LOG.info("Processed {" + (count + deletecount) + "}/{" + + allFiles.size() + "}"); + time = now; + } + } + } + LOG.info("Cached {" + count + "}/{" + allFiles.size() + + "} , currentSizeInBytes = " + cache.currentSizeInBytes); + LOG.info("Deleted {" + deletecount + "}/{" + allFiles.size() + + "} files ."); + } + + /** + * Store an item in the cache and return the input stream. If cache is in + * purgeMode or file doesn't exists, inputstream from a + * {@link TransientFileFactory#createTransientFile(String, String, File)} is + * returned. Otherwise inputStream from cached file is returned. This method + * doesn't close the incoming inputstream. + * + * @param fileName the key of cache. + * @param in the inputstream. + * @return the (new) input stream. + */ + public synchronized InputStream store(String fileName, final InputStream in) + throws IOException { + fileName = fileName.replace("\\", "/"); + File f = getFile(fileName); + long length = 0; + if (!f.exists() || isInPurgeMode()) { + OutputStream out = null; + File transFile = null; + try { + TransientFileFactory tff = TransientFileFactory.getInstance(); + transFile = tff.createTransientFile("s3-", "tmp", tmp); + out = new BufferedOutputStream(new FileOutputStream(transFile)); + length = IOUtils.copyLarge(in, out); + } finally { + IOUtils.closeQuietly(out); + } + // rename the file to local fs cache + if (canAdmitFile(length) + && (f.getParentFile().exists() || f.getParentFile().mkdirs()) + && transFile.renameTo(f) && f.exists()) { + if (transFile.exists() && transFile.delete()) { + LOG.warn("tmp file = " + transFile.getAbsolutePath() + + " not deleted successfully"); + } + transFile = null; + toBeDeleted.remove(fileName); + if (cache.get(fileName) == null) { + cache.put(fileName, f.length()); + } + } else { + f = transFile; + } + } else { + // f.exists and not in purge mode + f.setLastModified(System.currentTimeMillis()); + toBeDeleted.remove(fileName); + if (cache.get(fileName) == null) { + cache.put(fileName, f.length()); + } + } + cache.tryPurge(); + return new LazyFileInputStream(f); + } + + /** + * Store an item along with file in cache. Cache size is increased by + * {@link File#length()} If file already exists in cache, + * {@link File#setLastModified(long)} is updated with current time. + * + * @param fileName the key of cache. + * @param src file to be added to cache. + * @throws IOException + */ + public synchronized void store(String fileName, final File src) + throws IOException { + fileName = fileName.replace("\\", "/"); + File dest = getFile(fileName); + File parent = dest.getParentFile(); + if (src.exists() && !dest.exists() && !src.equals(dest) + && canAdmitFile(src.length()) + && (parent.exists() || parent.mkdirs()) && (src.renameTo(dest))) { + toBeDeleted.remove(fileName); + if (cache.get(fileName) == null) { + cache.put(fileName, dest.length()); + } + + } else if (dest.exists()) { + dest.setLastModified(System.currentTimeMillis()); + toBeDeleted.remove(fileName); + if (cache.get(fileName) == null) { + cache.put(fileName, dest.length()); + } + } + cache.tryPurge(); + } + + /** + * Return the inputstream from from cache, or null if not in the cache. + * + * @param fileName name of file. + * @return stream or null. + */ + public InputStream getIfStored(String fileName) throws IOException { + + fileName = fileName.replace("\\", "/"); + File f = getFile(fileName); + synchronized (this) { + if (!f.exists() || isInPurgeMode()) { + log("purgeMode true or file doesn't exists: getIfStored returned"); + return null; + } + f.setLastModified(System.currentTimeMillis()); + return new LazyFileInputStream(f); + } + } + + /** + * Delete file from cache. Size of cache is reduced by file length. The + * method is no-op if file doesn't exist in cache. + * + * @param fileName file name that need to be removed from cache. + */ + public synchronized void delete(String fileName) { + if (isInPurgeMode()) { + log("purgeMode true :delete returned"); + return; + } + fileName = fileName.replace("\\", "/"); + cache.remove(fileName); + } + + /** + * Returns length of file if exists in cache else returns null. + * @param fileName name of the file. + */ + public Long getFileLength(String fileName) { + fileName = fileName.replace("\\", "/"); + File f = getFile(fileName); + synchronized (this) { + if (!f.exists() || isInPurgeMode()) { + log("purgeMode true or file doesn't exists: getFileLength returned"); + return null; + } + f.setLastModified(System.currentTimeMillis()); + return f.length(); + } + } + + /** + * Close the cache. Cache maintain set of files which it was not able to + * delete successfully. This method will an attempt to delete all + * unsuccessful delete files. + */ + public void close() { + log("close"); + deleteOldFiles(); + } + + /** + * Check if cache can admit file of given length. + * @param length of the file. + * @return true if yes else return false. + */ + private synchronized boolean canAdmitFile(final long length) { + // order is important here + boolean value = !isInPurgeMode() && cache.canAdmitFile(length); + if (!value) { + log("cannot admit file of length=" + length + + " and currentSizeInBytes=" + cache.currentSizeInBytes); + } + return value; + } + + /** + * Return true if cache is in purge mode else return false. + */ + synchronized boolean isInPurgeMode() { + return purgeMode || maxSize == 0; + } + + /** + * Set purge mode. If set to true all cache operation will be no-op. If set + * to false, all operations to cache are available. + * + * @param purgeMode purge mode + */ + synchronized void setPurgeMode(final boolean purgeMode) { + this.purgeMode = purgeMode; + } + + File getFile(final String fileName) { + return new File(directory, fileName); + } + + private void deleteOldFiles() { + int initialSize = toBeDeleted.size(); + int count = 0; + for (String n : new ArrayList(toBeDeleted)) { + if (tryDelete(n)) { + count++; + } + } + LOG.info("deleted [" + count + "]/[" + initialSize + "] files"); + } + + /** + * This method tries to delete a file. If it is not able to delete file due + * to any reason, it add it toBeDeleted list. + * + * @param fileName name of the file which will be deleted. + * @return true if this method deletes file successfuly else return false. + */ + boolean tryDelete(final String fileName) { + log("cache delete " + fileName); + File f = getFile(fileName); + if (f.exists() && f.delete()) { + log(fileName + " deleted successfully"); + toBeDeleted.remove(fileName); + while (true) { + f = f.getParentFile(); + if (f.equals(directory) || f.list().length > 0) { + break; + } + // delete empty parent folders (except the main directory) + f.delete(); + } + return true; + } else if (f.exists()) { + LOG.info("not able to delete file = " + f.getAbsolutePath()); + toBeDeleted.add(fileName); + return false; + } + return true; + } + + static int maxSizeElements(final long bytes) { + // after a CQ installation, the average item in + // the data store is about 52 KB + int count = (int) (bytes / 65535); + count = Math.max(1024, count); + count = Math.min(64 * 1024, count); + return count; + } + + static void log(final String s) { + LOG.debug(s); + } + + /** + * A LRU based extension {@link LinkedHashMap}. The key is file name and + * value is length of file. + */ + private class LRUCache extends LinkedHashMap { + private static final long serialVersionUID = 1L; + + volatile long currentSizeInBytes; + + final long maxSizeInBytes; + + long cachePurgeResize; + + private long cachePurgeTrigSize; + + public LRUCache(final long maxSizeInBytes, + final double cachePurgeTrigFactor, + final double cachePurgeResizeFactor) { + super(maxSizeElements(maxSizeInBytes), (float) 0.75, true); + this.maxSizeInBytes = maxSizeInBytes; + this.cachePurgeTrigSize = new Double(cachePurgeTrigFactor + * maxSizeInBytes).longValue(); + this.cachePurgeResize = new Double(cachePurgeResizeFactor + * maxSizeInBytes).longValue(); + } + + /** + * Overridden {@link Map#remove(Object)} to delete corresponding file + * from file system. + */ + @Override + public synchronized Long remove(final Object key) { + String fileName = (String) key; + fileName = fileName.replace("\\", "/"); + Long flength = null; + if (tryDelete(fileName)) { + flength = super.remove(key); + if (flength != null) { + log("cache entry { " + fileName + "} with size {" + flength + + "} removed."); + currentSizeInBytes -= flength.longValue(); + } + } else if (!getFile(fileName).exists()) { + // second attempt. remove from cache if file doesn't exists + flength = super.remove(key); + if (flength != null) { + log(" file not exists. cache entry { " + fileName + + "} with size {" + flength + "} removed."); + currentSizeInBytes -= flength.longValue(); + } + } + return flength; + } + + @Override + public synchronized Long put(final String key, final Long value) { + long flength = value.longValue(); + currentSizeInBytes += flength; + return super.put(key.replace("\\", "/"), value); + } + + /** + * This method tries purging of local cache. It checks if local cache + * has exceeded the defined limit then it triggers purge cache job in a + * seperate thread. + */ + synchronized void tryPurge() { + if (currentSizeInBytes > cachePurgeTrigSize && !isInPurgeMode()) { + setPurgeMode(true); + LOG.info("currentSizeInBytes[" + cache.currentSizeInBytes + + "] exceeds (cachePurgeTrigSize)[" + + cache.cachePurgeTrigSize + "]"); + new Thread(new PurgeJob()).start(); + } + } + /** + * This method check if cache can admit file of given length. + * @param length length of file. + * @return true if cache size + length is less than maxSize. + */ + synchronized boolean canAdmitFile(final long length) { + return cache.currentSizeInBytes + length < cache.maxSizeInBytes; + } + } + + /** + * This class performs purging of local cache. It implements + * {@link Runnable} and should be invoked in a separate thread. + */ + private class PurgeJob implements Runnable { + public PurgeJob() { + // TODO Auto-generated constructor stub + } + + /** + * This method purges local cache till its size is less than + * cacheResizefactor * maxSize + */ + @Override + public void run() { + try { + synchronized (cache) { + LOG.info(" cache purge job started"); + // first try to delete toBeDeleted files + int initialSize = cache.size(); + for (String fileName : new ArrayList(toBeDeleted)) { + cache.remove(fileName); + } + Iterator> itr = cache.entrySet().iterator(); + while (itr.hasNext()) { + Map.Entry entry = itr.next(); + if (entry.getKey() != null) { + if (cache.currentSizeInBytes > cache.cachePurgeResize) { + itr.remove(); + + } else { + break; + } + } + + } + LOG.info(" cache purge job completed: cleaned [" + + (initialSize - cache.size()) + + "] files and currentSizeInBytes = [ " + + cache.currentSizeInBytes + "]"); + } + } catch (Exception e) { + LOG.error("error in purge jobs:", e); + } finally { + setPurgeMode(false); + } + } + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/MultiDataStore.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/MultiDataStore.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/MultiDataStore.java (working copy) @@ -0,0 +1,722 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.data; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Iterator; +import java.util.concurrent.locks.ReentrantLock; + +import javax.jcr.RepositoryException; + +import org.apache.commons.io.IOUtils; +import org.apache.jackrabbit.core.fs.FileSystem; +import org.apache.jackrabbit.core.fs.FileSystemException; +import org.apache.jackrabbit.core.fs.FileSystemResource; +import org.apache.jackrabbit.core.fs.local.LocalFileSystem; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A MultiDataStore can handle two independent DataStores. + *

+ * Attention: You will lost the global single instance mechanism ! + *

+ * It can be used if you have two storage systems. One for fast access and a + * other one like a archive DataStore on a slower storage system. All Files will + * be added to the primary DataStore. On read operations first the primary + * dataStore will be used and if no Record is found the archive DataStore will + * be used. The GarabageCollector will only remove files from the archive + * DataStore. + *

+ * The internal MoveDataTask will be started automatically and could be + * configured with the following properties. + *

+ * The Configuration: + * + *

+ * <DataStore class="org.apache.jackrabbit.core.data.MultiDataStore">
+ *     <param name="{@link #setMaxAge(int) maxAge}" value="60"/>
+ *     <param name="{@link #setMoveDataTaskSleep(int) moveDataTaskSleep}" value="604800"/>
+ *     <param name="{@link #setMoveDataTaskFirstRunHourOfDay(int) moveDataTaskFirstRunHourOfDay}" value="1"/>
+ *     <param name="{@link #setSleepBetweenRecords(long) sleepBetweenRecords}" value="100"/>
+ *     <param name="{@link #setDelayedDelete(boolean) delayedDelete}" value="false"/>
+ *     <param name="{@link #setDelayedDeleteSleep(long) delayedDeleteSleep}" value="86400"/>
+ *     <param name="primary" value="org.apache.jackrabbit.core.data.db.DbDataStore">
+ *        <param .../>
+ *     </param>
+ *     <param name="archive" value="org.apache.jackrabbit.core.data.FileDataStore">
+ *        <param .../>
+ *     </param>
+ * </DataStore>
+ * 
+ * + *
    + *
  • maxAge: defines how many days the content will reside in the + * primary data store. DataRecords that have been added before this time span + * will be moved to the archive data store. (default = 60)
  • + *
  • moveDataTaskSleep: specifies the sleep time of the + * moveDataTaskThread in seconds. (default = 60 * 60 * 24 * 7, which equals 7 + * days)
  • + *
  • moveDataTaskNextRunHourOfDay: specifies the hour at which + * the moveDataTaskThread initiates its first run (default = 1 + * which means 01:00 at night)
  • + *
  • sleepBetweenRecords: specifies the delay in milliseconds + * between scanning data records (default = 100)
  • + *
  • delayedDelete: its possible to delay the delete operation on + * the primary data store. The DataIdentifiers will be written to a temporary + * file. The file will be processed after a defined sleep (see + * delayedDeleteSleep) It's useful if you like to create a snapshot + * of the primary data store backend in the meantime before the data will be + * deleted. (default = false)
  • + *
  • delayedDeleteSleep: specifies the sleep time of the + * delayedDeleteTaskThread in seconds. (default = 60 * 60 * 24, which equals 1 + * day). This means the delayed delete from the primary data store will be + * processed after one day.
  • + *
+ */ +public class MultiDataStore implements DataStore { + + /** + * Logger instance + */ + private static Logger log = LoggerFactory.getLogger(MultiDataStore.class); + + private DataStore primaryDataStore; + private DataStore archiveDataStore; + + /** + * Max Age in days. + */ + private int maxAge = 60; + + /** + * ReentrantLock that is used while the MoveDataTask is running. + */ + private ReentrantLock moveDataTaskLock = new ReentrantLock(); + private boolean moveDataTaskRunning = false; + private Thread moveDataTaskThread; + + /** + * The sleep time in seconds of the MoveDataTask, 7 day default. + */ + private int moveDataTaskSleep = 60 * 60 * 24 * 7; + + /** + * Indicates when the next run of the move task is scheduled. The first run + * is scheduled by default at 01:00 hours. + */ + private Calendar moveDataTaskNextRun = Calendar.getInstance(); + + /** + * Its possible to delay the delete operation on the primary data store + * while move task is running. The delete will be executed after defined + * delayDeleteSleep. + */ + private boolean delayedDelete = false; + + /** + * The sleep time in seconds to delay remove operation on the primary data + * store, 1 day default. + */ + private long delayedDeleteSleep = 60 * 60 * 24; + + /** + * File that holds the data identifiers if delayDelete is enabled. + */ + private FileSystemResource identifiersToDeleteFile = null; + + private Thread deleteDelayedIdentifiersTaskThread; + + /** + * Name of the file which holds the identifiers if deleayed delete is + * enabled + */ + private final String IDENTIFIERS_TO_DELETE_FILE_KEY = "identifiersToDelete"; + + /** + * The delay time in milliseconds between scanning data records, 100 + * default. + */ + private long sleepBetweenRecords = 100; + + { + if (moveDataTaskNextRun.get(Calendar.HOUR_OF_DAY) >= 1) { + moveDataTaskNextRun.add(Calendar.DAY_OF_MONTH, 1); + } + moveDataTaskNextRun.set(Calendar.HOUR_OF_DAY, 1); + moveDataTaskNextRun.set(Calendar.MINUTE, 0); + moveDataTaskNextRun.set(Calendar.SECOND, 0); + moveDataTaskNextRun.set(Calendar.MILLISECOND, 0); + } + + /** + * Setter for the primary dataStore + * + * @param dataStore + */ + public void setPrimaryDataStore(DataStore dataStore) { + this.primaryDataStore = dataStore; + } + + /** + * Setter for the archive dataStore + * + * @param dataStore + */ + public void setArchiveDataStore(DataStore dataStore) { + this.archiveDataStore = dataStore; + } + + /** + * Check if a record for the given identifier exists in the primary data + * store. If not found there it will be returned from the archive data + * store. If no record exists, this method returns null. + * + * @param identifier + * data identifier + * @return the record if found, and null if not + */ + public DataRecord getRecordIfStored(DataIdentifier identifier) throws DataStoreException { + if (moveDataTaskRunning) { + moveDataTaskLock.lock(); + } + try { + DataRecord dataRecord = primaryDataStore.getRecordIfStored(identifier); + if (dataRecord == null) { + dataRecord = archiveDataStore.getRecordIfStored(identifier); + } + return dataRecord; + } finally { + if (moveDataTaskRunning) { + moveDataTaskLock.unlock(); + } + } + } + + /** + * Returns the identified data record from the primary data store. If not + * found there it will be returned from the archive data store. The given + * identifier should be the identifier of a previously saved data record. + * Since records are never removed, there should never be cases where the + * identified record is not found. Abnormal cases like that are treated as + * errors and handled by throwing an exception. + * + * @param identifier + * data identifier + * @return identified data record + * @throws DataStoreException + * if the data store could not be accessed, or if the given + * identifier is invalid + */ + public DataRecord getRecord(DataIdentifier identifier) throws DataStoreException { + if (moveDataTaskRunning) { + moveDataTaskLock.lock(); + } + try { + return primaryDataStore.getRecord(identifier); + } catch (DataStoreException e) { + return archiveDataStore.getRecord(identifier); + } finally { + if (moveDataTaskRunning) { + moveDataTaskLock.unlock(); + } + } + } + + /** + * Creates a new data record in the primary data store. The given binary + * stream is consumed and a binary record containing the consumed stream is + * created and returned. If the same stream already exists in another + * record, then that record is returned instead of creating a new one. + *

+ * The given stream is consumed and not closed by this + * method. It is the responsibility of the caller to close the stream. A + * typical call pattern would be: + * + *

+     *     InputStream stream = ...;
+     *     try {
+     *         record = store.addRecord(stream);
+     *     } finally {
+     *         stream.close();
+     *     }
+     * 
+ * + * @param stream + * binary stream + * @return data record that contains the given stream + * @throws DataStoreException + * if the data store could not be accessed + */ + public DataRecord addRecord(InputStream stream) throws DataStoreException { + return primaryDataStore.addRecord(stream); + } + + /** + * From now on, update the modified date of an object even when accessing it + * in the archive data store. Usually, the modified date is only updated + * when creating a new object, or when a new link is added to an existing + * object. When this setting is enabled, even getLength() will update the + * modified date. + * + * @param before + * - update the modified date to the current time if it is older + * than this value + */ + public void updateModifiedDateOnAccess(long before) { + archiveDataStore.updateModifiedDateOnAccess(before); + } + + /** + * Delete objects that have a modified date older than the specified date + * from the archive data store. + * + * @param min + * the minimum time + * @return the number of data records deleted + * @throws DataStoreException + */ + public int deleteAllOlderThan(long min) throws DataStoreException { + return archiveDataStore.deleteAllOlderThan(min); + } + + /** + * Get all identifiers from the archive data store. + * + * @return an iterator over all DataIdentifier objects + * @throws DataStoreException + * if the list could not be read + */ + public Iterator getAllIdentifiers() throws DataStoreException { + return archiveDataStore.getAllIdentifiers(); + } + + public DataRecord getRecordFromReference(String reference) + throws DataStoreException { + DataRecord record = primaryDataStore.getRecordFromReference(reference); + if (record == null) { + record = archiveDataStore.getRecordFromReference(reference); + } + return record; + } + + /** + * {@inheritDoc} + */ + public void init(String homeDir) throws RepositoryException { + if (delayedDelete) { + // First initialize the identifiersToDeleteFile + LocalFileSystem fileSystem = new LocalFileSystem(); + fileSystem.setRoot(new File(homeDir)); + identifiersToDeleteFile = new FileSystemResource(fileSystem, FileSystem.SEPARATOR + + IDENTIFIERS_TO_DELETE_FILE_KEY); + } + moveDataTaskThread = new Thread(new MoveDataTask(), + "Jackrabbit-MulitDataStore-MoveDataTaskThread"); + moveDataTaskThread.setDaemon(true); + moveDataTaskThread.start(); + log.info("MultiDataStore-MoveDataTask thread started; first run scheduled at " + + moveDataTaskNextRun.getTime()); + if (delayedDelete) { + try { + // Run on startup the DeleteDelayedIdentifiersTask only if the + // file exists and modify date is older than the + // delayedDeleteSleep timeout ... + if (identifiersToDeleteFile != null + && identifiersToDeleteFile.exists() + && (identifiersToDeleteFile.lastModified() + (delayedDeleteSleep * 1000)) < System + .currentTimeMillis()) { + deleteDelayedIdentifiersTaskThread = new Thread( + //Start immediately ... + new DeleteDelayedIdentifiersTask(0L), + "Jackrabbit-MultiDataStore-DeleteDelayedIdentifiersTaskThread"); + deleteDelayedIdentifiersTaskThread.setDaemon(true); + deleteDelayedIdentifiersTaskThread.start(); + log.info("Old entries in the " + IDENTIFIERS_TO_DELETE_FILE_KEY + + " File found. DeleteDelayedIdentifiersTask-Thread started now."); + } + } catch (FileSystemException e) { + throw new RepositoryException("I/O error while reading from '" + + identifiersToDeleteFile.getPath() + "'", e); + } + } + } + + /** + * Get the minimum size of an object that should be stored in the primary + * data store. + * + * @return the minimum size in bytes + */ + public int getMinRecordLength() { + return primaryDataStore.getMinRecordLength(); + } + + /** + * {@inheritDoc} + */ + public void close() throws DataStoreException { + DataStoreException lastException = null; + // 1. close the primary data store + try { + primaryDataStore.close(); + } catch (DataStoreException e) { + lastException = e; + } + // 2. close the archive data store + try { + archiveDataStore.close(); + } catch (DataStoreException e) { + if (lastException != null) { + lastException = new DataStoreException(lastException); + } + } + // 3. if moveDataTaskThread is running interrupt it + try { + if (moveDataTaskRunning) { + moveDataTaskThread.interrupt(); + } + } catch (Exception e) { + if (lastException != null) { + lastException = new DataStoreException(lastException); + } + } + // 4. if deleteDelayedIdentifiersTaskThread is running interrupt it + try { + if (deleteDelayedIdentifiersTaskThread != null + && deleteDelayedIdentifiersTaskThread.isAlive()) { + deleteDelayedIdentifiersTaskThread.interrupt(); + } + } catch (Exception e) { + if (lastException != null) { + lastException = new DataStoreException(lastException); + } + } + if (lastException != null) { + throw lastException; + } + } + + /** + * {@inheritDoc} + */ + public void clearInUse() { + archiveDataStore.clearInUse(); + } + + public int getMaxAge() { + return maxAge; + } + + public void setMaxAge(int maxAge) { + this.maxAge = maxAge; + } + + public int getMoveDataTaskSleep() { + return moveDataTaskSleep; + } + + public int getMoveDataTaskFirstRunHourOfDay() { + return moveDataTaskNextRun.get(Calendar.HOUR_OF_DAY); + } + + public void setMoveDataTaskSleep(int sleep) { + this.moveDataTaskSleep = sleep; + } + + public void setMoveDataTaskFirstRunHourOfDay(int hourOfDay) { + moveDataTaskNextRun = Calendar.getInstance(); + if (moveDataTaskNextRun.get(Calendar.HOUR_OF_DAY) >= hourOfDay) { + moveDataTaskNextRun.add(Calendar.DAY_OF_MONTH, 1); + } + moveDataTaskNextRun.set(Calendar.HOUR_OF_DAY, hourOfDay); + moveDataTaskNextRun.set(Calendar.MINUTE, 0); + moveDataTaskNextRun.set(Calendar.SECOND, 0); + moveDataTaskNextRun.set(Calendar.MILLISECOND, 0); + } + + public void setSleepBetweenRecords(long millis) { + this.sleepBetweenRecords = millis; + } + + public long getSleepBetweenRecords() { + return sleepBetweenRecords; + } + + public boolean isDelayedDelete() { + return delayedDelete; + } + + public void setDelayedDelete(boolean delayedDelete) { + this.delayedDelete = delayedDelete; + } + + public long getDelayedDeleteSleep() { + return delayedDeleteSleep; + } + + public void setDelayedDeleteSleep(long delayedDeleteSleep) { + this.delayedDeleteSleep = delayedDeleteSleep; + } + + /** + * Writes the given DataIdentifier to the delayedDeletedFile. + * + * @param identifier + * @return boolean true if it was successful otherwise false + */ + private boolean writeDelayedDataIdentifier(DataIdentifier identifier) { + BufferedWriter writer = null; + try { + File identifierFile = new File( + ((LocalFileSystem) identifiersToDeleteFile.getFileSystem()).getPath(), + identifiersToDeleteFile.getPath()); + writer = new BufferedWriter(new FileWriter(identifierFile, true)); + writer.write(identifier.toString()); + return true; + } catch (Exception e) { + log.warn("I/O error while saving DataIdentifier (stacktrace on DEBUG log level) to '" + + identifiersToDeleteFile.getPath() + "': " + e.getMessage()); + log.debug("Root cause: ", e); + return false; + } finally { + IOUtils.closeQuietly(writer); + } + } + + /** + * Purges the delayedDeletedFile. + * + * @return boolean true if it was successful otherwise false + */ + private boolean purgeDelayedDeleteFile() { + BufferedWriter writer = null; + try { + writer = new BufferedWriter(new OutputStreamWriter( + identifiersToDeleteFile.getOutputStream())); + writer.write(""); + return true; + } catch (Exception e) { + log.warn("I/O error while purging (stacktrace on DEBUG log level) the " + + IDENTIFIERS_TO_DELETE_FILE_KEY + " file '" + + identifiersToDeleteFile.getPath() + "': " + e.getMessage()); + log.debug("Root cause: ", e); + return false; + } finally { + IOUtils.closeQuietly(writer); + } + } + + /** + * Class for maintaining the MultiDataStore. It will be used to move the + * content of the primary data store to the archive data store. + */ + public class MoveDataTask implements Runnable { + + /** + * {@inheritDoc} + */ + public void run() { + while (!Thread.currentThread().isInterrupted()) { + try { + log.info("Next move-data task run scheduled at " + + moveDataTaskNextRun.getTime()); + long sleepTime = moveDataTaskNextRun.getTimeInMillis() + - System.currentTimeMillis(); + if (sleepTime > 0) { + Thread.sleep(sleepTime); + } + moveDataTaskRunning = true; + moveOutdatedData(); + moveDataTaskRunning = false; + moveDataTaskNextRun.add(Calendar.SECOND, moveDataTaskSleep); + if (delayedDelete) { + if (deleteDelayedIdentifiersTaskThread != null + && deleteDelayedIdentifiersTaskThread.isAlive()) { + log.warn("The DeleteDelayedIdentifiersTask-Thread is already running."); + } else { + deleteDelayedIdentifiersTaskThread = new Thread( + new DeleteDelayedIdentifiersTask(delayedDeleteSleep), + "Jackrabbit-MultiDataStore-DeleteDelayedIdentifiersTaskThread"); + deleteDelayedIdentifiersTaskThread.setDaemon(true); + deleteDelayedIdentifiersTaskThread.start(); + } + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + log.warn("Interrupted: stopping move-data task."); + } + + /** + * Moves outdated data from primary to archive data store + */ + protected void moveOutdatedData() { + try { + long now = System.currentTimeMillis(); + long maxAgeMilli = 1000L * 60 * 60 * 24 * maxAge; + log.debug("Collecting all Identifiers from PrimaryDataStore..."); + Iterator allIdentifiers = primaryDataStore.getAllIdentifiers(); + int moved = 0; + while (allIdentifiers.hasNext()) { + DataIdentifier identifier = allIdentifiers.next(); + DataRecord dataRecord = primaryDataStore.getRecord(identifier); + if ((dataRecord.getLastModified() + maxAgeMilli) < now) { + try { + moveDataTaskLock.lock(); + if (delayedDelete) { + // first write it to the file and then add it to + // the archive data store ... + if (writeDelayedDataIdentifier(identifier)) { + archiveDataStore.addRecord(dataRecord.getStream()); + moved++; + } + } else { + // first add it and then delete it .. not really + // atomic ... + archiveDataStore.addRecord(dataRecord.getStream()); + ((MultiDataStoreAware) primaryDataStore).deleteRecord(identifier); + moved++; + } + if (moved % 100 == 0) { + log.debug("Moving DataRecord's... ({})", moved); + } + } catch (DataStoreException e) { + log.error("Failed to move DataRecord. DataIdentifier: " + identifier, e); + } finally { + moveDataTaskLock.unlock(); + } + } + // Give other threads time to use the MultiDataStore while + // MoveDataTask is running.. + Thread.sleep(sleepBetweenRecords); + } + if (delayedDelete) { + log.info("Moved " + + moved + + " DataRecords to the archive data store. The DataRecords in the primary data store will be removed in " + + delayedDeleteSleep + " seconds."); + } else { + log.info("Moved " + moved + " DataRecords to the archive data store."); + } + } catch (Exception e) { + log.warn("Failed to run move-data task.", e); + } + } + } + + /** + * Class to clean up the delayed DataRecords from the primary data store. + */ + public class DeleteDelayedIdentifiersTask implements Runnable { + + boolean run = true; + private long sleepTime = 0L; + + /** + * Constructor + * @param sleep how long this DeleteDelayedIdentifiersTask should sleep in seconds. + */ + public DeleteDelayedIdentifiersTask(long sleep) { + this.sleepTime = (sleep * 1000L); + } + + @Override + public void run() { + if (moveDataTaskRunning) { + log.warn("It's not supported to run the DeleteDelayedIdentifiersTask while the MoveDataTask is running."); + return; + } + while (run && !Thread.currentThread().isInterrupted()) { + if (sleepTime > 0) { + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + log.info("Start to delete DataRecords from the primary data store."); + BufferedReader reader = null; + ArrayList problemIdentifiers = new ArrayList(); + try { + int deleted = 0; + reader = new BufferedReader(new InputStreamReader( + identifiersToDeleteFile.getInputStream())); + while (true) { + String s = reader.readLine(); + if (s == null || s.equals("")) { + break; + } + DataIdentifier identifier = new DataIdentifier(s); + try { + moveDataTaskLock.lock(); + ((MultiDataStoreAware) primaryDataStore).deleteRecord(identifier); + deleted++; + } catch (DataStoreException e) { + log.error("Failed to delete DataRecord. DataIdentifier: " + identifier, + e); + problemIdentifiers.add(identifier); + } finally { + moveDataTaskLock.unlock(); + } + // Give other threads time to use the MultiDataStore + // while + // DeleteDelayedIdentifiersTask is running.. + Thread.sleep(sleepBetweenRecords); + } + log.info("Deleted " + deleted + " DataRecords from the primary data store."); + if (problemIdentifiers.isEmpty()) { + try { + identifiersToDeleteFile.delete(); + } catch (FileSystemException e) { + log.warn("Unable to delete the " + IDENTIFIERS_TO_DELETE_FILE_KEY + + " File."); + if (!purgeDelayedDeleteFile()) { + log.error("Unable to purge the " + IDENTIFIERS_TO_DELETE_FILE_KEY + + " File."); + } + } + } else { + if (purgeDelayedDeleteFile()) { + for (int x = 0; x < problemIdentifiers.size(); x++) { + writeDelayedDataIdentifier(problemIdentifiers.get(x)); + } + } + } + } catch (InterruptedException e) { + log.warn("Interrupted: stopping delayed-delete task."); + Thread.currentThread().interrupt(); + } catch (Exception e) { + log.warn("Failed to run delayed-delete task.", e); + } finally { + IOUtils.closeQuietly(reader); + run = false; + } + } + } + } + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/MultiDataStoreAware.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/MultiDataStoreAware.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/MultiDataStoreAware.java (working copy) @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.data; + +/** + * To use a DataStore within a MultiDataStore it must implement this + * MultiDataStoreAware Interface. It extends a DataStore to delete a single + * DataRecord. + */ +public interface MultiDataStoreAware { + + /** + * Deletes a single DataRecord based on the given identifier. Delete will + * only be used by the {@link MoveDataTask}. + * + * @param identifier + * data identifier + * @throws DataStoreException + * if the data store could not be accessed, or if the given + * identifier is invalid + */ + void deleteRecord(DataIdentifier identifier) throws DataStoreException; + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/ScanEventListener.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/ScanEventListener.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/ScanEventListener.java (working copy) @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.data; + +import org.apache.jackrabbit.api.management.MarkEventListener; + +/** + * The listener interface for receiving garbage collection scan events. + */ +public interface ScanEventListener extends MarkEventListener { + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/db/DbDataRecord.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/db/DbDataRecord.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/db/DbDataRecord.java (working copy) @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.data.db; + +import org.apache.jackrabbit.core.data.AbstractDataRecord; +import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.core.data.DataStoreException; + +import java.io.BufferedInputStream; +import java.io.InputStream; + +/** + * Data record that is stored in a database + */ +public class DbDataRecord extends AbstractDataRecord { + + protected final DbDataStore store; + protected final long length; + protected long lastModified; + + /** + * Creates a data record based on the given identifier and length. + * + * @param identifier data identifier + * @param length the length + * @param lastModified + */ + public DbDataRecord(DbDataStore store, DataIdentifier identifier, long length, long lastModified) { + super(store, identifier); + this.store = store; + this.length = length; + this.lastModified = lastModified; + } + + /** + * {@inheritDoc} + */ + public long getLength() throws DataStoreException { + lastModified = store.touch(getIdentifier(), lastModified); + return length; + } + + /** + * {@inheritDoc} + */ + public InputStream getStream() throws DataStoreException { + lastModified = store.touch(getIdentifier(), lastModified); + return new BufferedInputStream(new DbInputStream(store, getIdentifier())); + } + + /** + * {@inheritDoc} + */ + public long getLastModified() { + return lastModified; + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/db/DbDataStore.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/db/DbDataStore.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/db/DbDataStore.java (working copy) @@ -0,0 +1,1000 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.data.db; + +import org.apache.commons.io.input.CountingInputStream; +import org.apache.jackrabbit.core.data.AbstractDataStore; +import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.core.data.DataRecord; +import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.core.data.MultiDataStoreAware; +import org.apache.jackrabbit.core.util.db.CheckSchemaOperation; +import org.apache.jackrabbit.core.util.db.ConnectionFactory; +import org.apache.jackrabbit.core.util.db.ConnectionHelper; +import org.apache.jackrabbit.core.util.db.DatabaseAware; +import org.apache.jackrabbit.core.util.db.DbUtility; +import org.apache.jackrabbit.core.util.db.StreamWrapper; +import org.apache.jackrabbit.util.Text; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.lang.ref.WeakReference; +import java.security.DigestInputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.UUID; +import java.util.WeakHashMap; + +import javax.jcr.RepositoryException; +import javax.sql.DataSource; + +/** + * A data store implementation that stores the records in a database using JDBC. + * + * Configuration: + *
+ * <DataStore class="org.apache.jackrabbit.core.data.db.DbDataStore">
+ *     <param name="{@link #setUrl(String) url}" value="jdbc:postgresql:test"/>
+ *     <param name="{@link #setUser(String) user}" value="sa"/>
+ *     <param name="{@link #setPassword(String) password}" value="sa"/>
+ *     <param name="{@link #setDatabaseType(String) databaseType}" value="postgresql"/>
+ *     <param name="{@link #setDriver(String) driver}" value="org.postgresql.Driver"/>
+ *     <param name="{@link #setMinRecordLength(int) minRecordLength}" value="1024"/>
+ *     <param name="{@link #setMaxConnections(int) maxConnections}" value="2"/>
+ *     <param name="{@link #setCopyWhenReading(boolean) copyWhenReading}" value="true"/>
+ *     <param name="{@link #setTablePrefix(String) tablePrefix}" value=""/>
+ *     <param name="{@link #setSchemaObjectPrefix(String) schemaObjectPrefix}" value=""/>
+ *     <param name="{@link #setSchemaCheckEnabled(String) schemaCheckEnabled}" value="true"/>
+ * </DataStore>
+ * 
+ *

+ * Only URL, user name and password usually need to be set. + * The remaining settings are generated using the database URL sub-protocol from the + * database type resource file. + *

+ * JNDI can be used to get the connection. In this case, use the javax.naming.InitialContext as the driver, + * and the JNDI name as the URL. If the user and password are configured in the JNDI resource, + * they should not be configured here. Example JNDI settings: + *

+ * <param name="driver" value="javax.naming.InitialContext" />
+ * <param name="url" value="java:comp/env/jdbc/Test" />
+ * 
+ *

+ * For Microsoft SQL Server 2005, there is a problem reading large BLOBs. You will need to use + * the JDBC driver version 1.2 or newer, and append ;responseBuffering=adaptive to the database URL. + * Don't append ;selectMethod=cursor, otherwise it can still run out of memory. + * Example database URL: jdbc:sqlserver://localhost:4220;DatabaseName=test;responseBuffering=adaptive + *

+ * By default, the data is copied to a temp file when reading, to avoid problems when reading multiple + * blobs at the same time. + *

+ * The tablePrefix can be used to specify a schema and / or catalog name: + * <param name="tablePrefix" value="ds."> + */ +public class DbDataStore extends AbstractDataStore + implements DatabaseAware, MultiDataStoreAware { + + /** + * The default value for the minimum object size. + */ + public static final int DEFAULT_MIN_RECORD_LENGTH = 100; + + /** + * Write to a temporary file to get the length (slow, but always works). + * This is the default setting. + */ + public static final String STORE_TEMP_FILE = "tempFile"; + + /** + * Call PreparedStatement.setBinaryStream(..., -1) + */ + public static final String STORE_SIZE_MINUS_ONE = "-1"; + + /** + * Call PreparedStatement.setBinaryStream(..., Integer.MAX_VALUE) + */ + public static final String STORE_SIZE_MAX = "max"; + + /** + * The digest algorithm used to uniquely identify records. + */ + protected static final String DIGEST = "SHA-1"; + + /** + * The prefix used for temporary objects. + */ + protected static final String TEMP_PREFIX = "TEMP_"; + + /** + * Logger instance + */ + private static Logger log = LoggerFactory.getLogger(DbDataStore.class); + + /** + * The minimum modified date. If a file is accessed (read or write) with a modified date + * older than this value, the modified date is updated to the current time. + */ + protected long minModifiedDate; + + /** + * The database URL used. + */ + protected String url; + + /** + * The database driver. + */ + protected String driver; + + /** + * The user name. + */ + protected String user; + + /** + * The password + */ + protected String password; + + /** + * The database type used. + */ + protected String databaseType; + + /** + * The minimum size of an object that should be stored in this data store. + */ + protected int minRecordLength = DEFAULT_MIN_RECORD_LENGTH; + + /** + * The prefix for the datastore table, empty by default. + */ + protected String tablePrefix = ""; + + /** + * The prefix of the table names. By default it is empty. + */ + protected String schemaObjectPrefix = ""; + + /** + * Whether the schema check must be done during initialization. + */ + private boolean schemaCheckEnabled = true; + + /** + * The logical name of the DataSource to use. + */ + protected String dataSourceName; + + /** + * This is the property 'table' + * in the [databaseType].properties file, initialized with the default value. + */ + protected String tableSQL = "DATASTORE"; + + /** + * This is the property 'createTable' + * in the [databaseType].properties file, initialized with the default value. + */ + protected String createTableSQL = + "CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA BLOB)"; + + /** + * This is the property 'insertTemp' + * in the [databaseType].properties file, initialized with the default value. + */ + protected String insertTempSQL = + "INSERT INTO ${tablePrefix}${table} VALUES(?, 0, ?, NULL)"; + + /** + * This is the property 'updateData' + * in the [databaseType].properties file, initialized with the default value. + */ + protected String updateDataSQL = + "UPDATE ${tablePrefix}${table} SET DATA=? WHERE ID=?"; + + /** + * This is the property 'updateLastModified' + * in the [databaseType].properties file, initialized with the default value. + */ + protected String updateLastModifiedSQL = + "UPDATE ${tablePrefix}${table} SET LAST_MODIFIED=? WHERE ID=? AND LAST_MODIFIED> inUse = + Collections.synchronizedMap(new WeakHashMap>()); + + /** + * The temporary identifiers that are currently in use. + */ + protected List temporaryInUse = Collections.synchronizedList(new ArrayList()); + + /** + * The {@link ConnectionHelper} set in the {@link #init(String)} method. + * */ + protected ConnectionHelper conHelper; + + /** + * The repositories {@link ConnectionFactory}. + */ + private ConnectionFactory connectionFactory; + + public void setConnectionFactory(ConnectionFactory connnectionFactory) { + this.connectionFactory = connnectionFactory; + } + + public DataRecord addRecord(InputStream stream) throws DataStoreException { + InputStream fileInput = null; + String tempId = null; + ResultSet rs = null; + try { + long tempModified; + while (true) { + try { + tempModified = System.currentTimeMillis(); + String id = UUID.randomUUID().toString(); + tempId = TEMP_PREFIX + id; + temporaryInUse.add(tempId); + // SELECT LENGTH, LAST_MODIFIED FROM DATASTORE WHERE ID=? + rs = conHelper.query(selectMetaSQL, tempId); + boolean hasNext = rs.next(); + DbUtility.close(rs); + rs = null; + if (hasNext) { + // re-try in the very, very unlikely event that the row already exists + continue; + } + // INSERT INTO DATASTORE VALUES(?, 0, ?, NULL) + conHelper.exec(insertTempSQL, tempId, tempModified); + break; + } catch (Exception e) { + throw convert("Can not insert new record", e); + } finally { + DbUtility.close(rs); + // prevent that rs.close() is called again + rs = null; + } + } + MessageDigest digest = getDigest(); + DigestInputStream dIn = new DigestInputStream(stream, digest); + CountingInputStream in = new CountingInputStream(dIn); + StreamWrapper wrapper; + if (STORE_SIZE_MINUS_ONE.equals(storeStream)) { + wrapper = new StreamWrapper(in, -1); + } else if (STORE_SIZE_MAX.equals(storeStream)) { + wrapper = new StreamWrapper(in, Integer.MAX_VALUE); + } else if (STORE_TEMP_FILE.equals(storeStream)) { + File temp = moveToTempFile(in); + long length = temp.length(); + wrapper = new StreamWrapper(new TempFileInputStream(temp, true), length); + } else { + throw new DataStoreException("Unsupported stream store algorithm: " + storeStream); + } + // UPDATE DATASTORE SET DATA=? WHERE ID=? + conHelper.exec(updateDataSQL, wrapper, tempId); + long length = in.getByteCount(); + DataIdentifier identifier = + new DataIdentifier(encodeHexString(digest.digest())); + usesIdentifier(identifier); + String id = identifier.toString(); + long newModified; + while (true) { + newModified = System.currentTimeMillis(); + if (checkExisting(tempId, length, identifier)) { + touch(identifier, newModified); + conHelper.exec(deleteSQL, tempId); + break; + } + try { + // UPDATE DATASTORE SET ID=?, LENGTH=?, LAST_MODIFIED=? + // WHERE ID=? AND LAST_MODIFIED=? + int count = conHelper.update(updateSQL, + id, length, newModified, tempId, tempModified); + // If update count is 0, the last modified time of the + // temporary row was changed - which means we need to + // re-try using a new last modified date (a later one) + // because we need to ensure the new last modified date + // is _newer_ than the old (otherwise the garbage + // collection could delete rows) + if (count != 0) { + // update was successful + break; + } + } catch (SQLException e) { + // duplicate key (the row already exists) - repeat + // we use exception handling for flow control here, which is bad, + // but the alternative is to use UPDATE ... WHERE ... (SELECT ...) + // which could cause a deadlock in some databases - also, + // duplicate key will only occur if somebody else concurrently + // added the same record (which is very unlikely) + } + // SELECT LENGTH, LAST_MODIFIED FROM DATASTORE WHERE ID=? + rs = conHelper.query(selectMetaSQL, tempId); + if (!rs.next()) { + // the row was deleted, which is unexpected / not allowed + String msg = + DIGEST + " temporary entry deleted: " + + " id=" + tempId + " length=" + length; + log.error(msg); + throw new DataStoreException(msg); + } + tempModified = rs.getLong(2); + DbUtility.close(rs); + rs = null; + } + usesIdentifier(identifier); + DbDataRecord record = new DbDataRecord(this, identifier, length, newModified); + return record; + } catch (Exception e) { + throw convert("Can not insert new record", e); + } finally { + if (tempId != null) { + temporaryInUse.remove(tempId); + } + DbUtility.close(rs); + if (fileInput != null) { + try { + fileInput.close(); + } catch (IOException e) { + throw convert("Can not close temporary file", e); + } + } + } + } + + /** + * Check if a row with this ID already exists. + * + * @return true if the row exists and the length matches + * @throw DataStoreException if a row exists, but the length is different + */ + private boolean checkExisting(String tempId, long length, DataIdentifier identifier) throws DataStoreException, SQLException { + String id = identifier.toString(); + // SELECT LENGTH, LAST_MODIFIED FROM DATASTORE WHERE ID=? + ResultSet rs = null; + try { + rs = conHelper.query(selectMetaSQL, id); + if (rs.next()) { + long oldLength = rs.getLong(1); + long lastModified = rs.getLong(2); + if (oldLength != length) { + String msg = + DIGEST + " collision: temp=" + tempId + + " id=" + id + " length=" + length + + " oldLength=" + oldLength; + log.error(msg); + throw new DataStoreException(msg); + } + DbUtility.close(rs); + rs = null; + touch(identifier, lastModified); + // row already exists + conHelper.exec(deleteSQL, tempId); + return true; + } + } finally { + DbUtility.close(rs); + } + return false; + } + + /** + * Creates a temp file and copies the data there. + * The input stream is closed afterwards. + * + * @param in the input stream + * @return the file + * @throws IOException + */ + private File moveToTempFile(InputStream in) throws IOException { + File temp = File.createTempFile("dbRecord", null); + TempFileInputStream.writeToFileAndClose(in, temp); + return temp; + } + + public synchronized void deleteRecord(DataIdentifier identifier) throws DataStoreException { + try { + conHelper.exec(deleteSQL, identifier.toString()); + } catch (Exception e) { + throw convert("Can not delete record", e); + } + } + + public synchronized int deleteAllOlderThan(long min) throws DataStoreException { + try { + ArrayList touch = new ArrayList(); + ArrayList ids = new ArrayList(inUse.keySet()); + for (DataIdentifier identifier: ids) { + if (identifier != null) { + touch.add(identifier.toString()); + } + } + touch.addAll(temporaryInUse); + for (String key : touch) { + updateLastModifiedDate(key, 0); + } + // DELETE FROM DATASTORE WHERE LAST_MODIFIED getAllIdentifiers() throws DataStoreException { + ArrayList list = new ArrayList(); + ResultSet rs = null; + try { + // SELECT ID FROM DATASTORE + rs = conHelper.query(selectAllSQL); + while (rs.next()) { + String id = rs.getString(1); + if (!id.startsWith(TEMP_PREFIX)) { + DataIdentifier identifier = new DataIdentifier(id); + list.add(identifier); + } + } + log.debug("Found " + list.size() + " identifiers."); + return list.iterator(); + } catch (Exception e) { + throw convert("Can not read records", e); + } finally { + DbUtility.close(rs); + } + } + + public int getMinRecordLength() { + return minRecordLength; + } + + /** + * Set the minimum object length. + * The maximum value is around 32000. + * + * @param minRecordLength the length + */ + public void setMinRecordLength(int minRecordLength) { + this.minRecordLength = minRecordLength; + } + + public DataRecord getRecordIfStored(DataIdentifier identifier) throws DataStoreException { + usesIdentifier(identifier); + ResultSet rs = null; + try { + String id = identifier.toString(); + // SELECT LENGTH, LAST_MODIFIED FROM DATASTORE WHERE ID = ? + rs = conHelper.query(selectMetaSQL, id); + if (!rs.next()) { + return null; + } + long length = rs.getLong(1); + long lastModified = rs.getLong(2); + DbUtility.close(rs); + rs = null; + lastModified = touch(identifier, lastModified); + return new DbDataRecord(this, identifier, length, lastModified); + } catch (Exception e) { + throw convert("Can not read identifier " + identifier, e); + } finally { + DbUtility.close(rs); + } + } + + /** + * Open the input stream. This method sets those fields of the caller + * that need to be closed once the input stream is read. + * + * @param inputStream the database input stream object + * @param identifier data identifier + * @throws DataStoreException if the data store could not be accessed, + * or if the given identifier is invalid + */ + InputStream openStream(DbInputStream inputStream, DataIdentifier identifier) throws DataStoreException { + ResultSet rs = null; + try { + // SELECT ID, DATA FROM DATASTORE WHERE ID = ? + rs = conHelper.query(selectDataSQL, identifier.toString()); + if (!rs.next()) { + throw new DataStoreException("Record not found: " + identifier); + } + InputStream stream = rs.getBinaryStream(2); + if (stream == null) { + stream = new ByteArrayInputStream(new byte[0]); + DbUtility.close(rs); + } else if (copyWhenReading) { + // If we copy while reading, create a temp file and close the stream + File temp = moveToTempFile(stream); + stream = new BufferedInputStream(new TempFileInputStream(temp, false)); + DbUtility.close(rs); + } else { + stream = new BufferedInputStream(stream); + inputStream.setResultSet(rs); + } + return stream; + } catch (Exception e) { + DbUtility.close(rs); + throw convert("Retrieving database resource ", e); + } + } + + public synchronized void init(String homeDir) throws DataStoreException { + try { + initDatabaseType(); + + conHelper = createConnectionHelper(getDataSource()); + + if (isSchemaCheckEnabled()) { + createCheckSchemaOperation().run(); + } + } catch (Exception e) { + throw convert("Can not init data store, driver=" + driver + " url=" + url + " user=" + user + + " schemaObjectPrefix=" + schemaObjectPrefix + " tableSQL=" + tableSQL + " createTableSQL=" + createTableSQL, e); + } + } + + private DataSource getDataSource() throws Exception { + if (getDataSourceName() == null || "".equals(getDataSourceName())) { + return connectionFactory.getDataSource(getDriver(), getUrl(), getUser(), getPassword()); + } else { + return connectionFactory.getDataSource(dataSourceName); + } + } + + /** + * This method is called from the {@link #init(String)} method of this class and returns a + * {@link ConnectionHelper} instance which is assigned to the {@code conHelper} field. Subclasses may + * override it to return a specialized connection helper. + * + * @param dataSrc the {@link DataSource} of this persistence manager + * @return a {@link ConnectionHelper} + * @throws Exception on error + */ + protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception { + return new ConnectionHelper(dataSrc, false); + } + + /** + * This method is called from {@link #init(String)} after the + * {@link #createConnectionHelper(DataSource)} method, and returns a default {@link CheckSchemaOperation}. + * + * @return a new {@link CheckSchemaOperation} instance + */ + protected final CheckSchemaOperation createCheckSchemaOperation() { + String tableName = tablePrefix + schemaObjectPrefix + tableSQL; + return new CheckSchemaOperation(conHelper, new ByteArrayInputStream(createTableSQL.getBytes()), tableName); + } + + protected void initDatabaseType() throws DataStoreException { + boolean failIfNotFound = false; + if (databaseType == null) { + if (dataSourceName != null) { + try { + databaseType = connectionFactory.getDataBaseType(dataSourceName); + } catch (RepositoryException e) { + throw new DataStoreException(e); + } + } else { + if (!url.startsWith("jdbc:")) { + return; + } + int start = "jdbc:".length(); + int end = url.indexOf(':', start); + databaseType = url.substring(start, end); + } + } else { + failIfNotFound = true; + } + + InputStream in = + DbDataStore.class.getResourceAsStream(databaseType + ".properties"); + if (in == null) { + if (failIfNotFound) { + String msg = + "Configuration error: The resource '" + databaseType + + ".properties' could not be found;" + + " Please verify the databaseType property"; + log.debug(msg); + throw new DataStoreException(msg); + } else { + return; + } + } + Properties prop = new Properties(); + try { + try { + prop.load(in); + } finally { + in.close(); + } + } catch (IOException e) { + String msg = "Configuration error: Could not read properties '" + databaseType + ".properties'"; + log.debug(msg); + throw new DataStoreException(msg, e); + } + if (driver == null) { + driver = getProperty(prop, "driver", driver); + } + tableSQL = getProperty(prop, "table", tableSQL); + createTableSQL = getProperty(prop, "createTable", createTableSQL); + insertTempSQL = getProperty(prop, "insertTemp", insertTempSQL); + updateDataSQL = getProperty(prop, "updateData", updateDataSQL); + updateLastModifiedSQL = getProperty(prop, "updateLastModified", updateLastModifiedSQL); + updateSQL = getProperty(prop, "update", updateSQL); + deleteSQL = getProperty(prop, "delete", deleteSQL); + deleteOlderSQL = getProperty(prop, "deleteOlder", deleteOlderSQL); + selectMetaSQL = getProperty(prop, "selectMeta", selectMetaSQL); + selectAllSQL = getProperty(prop, "selectAll", selectAllSQL); + selectDataSQL = getProperty(prop, "selectData", selectDataSQL); + storeStream = getProperty(prop, "storeStream", storeStream); + if (!STORE_SIZE_MINUS_ONE.equals(storeStream) + && !STORE_TEMP_FILE.equals(storeStream) + && !STORE_SIZE_MAX.equals(storeStream)) { + String msg = "Unsupported Stream store mechanism: " + storeStream + + " supported are: " + STORE_SIZE_MINUS_ONE + ", " + + STORE_TEMP_FILE + ", " + STORE_SIZE_MAX; + log.debug(msg); + throw new DataStoreException(msg); + } + } + + /** + * Get the expanded property value. The following placeholders are supported: + * ${table}: the table name (the default is DATASTORE) and + * ${tablePrefix}: tablePrefix plus schemaObjectPrefix as set in the configuration + * + * @param prop the properties object + * @param key the key + * @param defaultValue the default value + * @return the property value (placeholders are replaced) + */ + protected String getProperty(Properties prop, String key, String defaultValue) { + String sql = prop.getProperty(key, defaultValue); + sql = Text.replace(sql, "${table}", tableSQL).trim(); + sql = Text.replace(sql, "${tablePrefix}", tablePrefix + schemaObjectPrefix).trim(); + return sql; + } + + /** + * Convert an exception to a data store exception. + * + * @param cause the message + * @param e the root cause + * @return the data store exception + */ + protected DataStoreException convert(String cause, Exception e) { + log.warn(cause, e); + if (e instanceof DataStoreException) { + return (DataStoreException) e; + } else { + return new DataStoreException(cause, e); + } + } + + public void updateModifiedDateOnAccess(long before) { + log.debug("Update modifiedDate on access before " + before); + minModifiedDate = before; + } + + /** + * Update the modified date of an entry if required. + * + * @param identifier the entry identifier + * @param lastModified the current last modified date + * @return the new modified date + */ + long touch(DataIdentifier identifier, long lastModified) throws DataStoreException { + usesIdentifier(identifier); + return updateLastModifiedDate(identifier.toString(), lastModified); + } + + private long updateLastModifiedDate(String key, long lastModified) throws DataStoreException { + if (lastModified < minModifiedDate) { + long now = System.currentTimeMillis(); + try { + // UPDATE DATASTORE SET LAST_MODIFIED = ? WHERE ID = ? AND LAST_MODIFIED < ? + conHelper.update(updateLastModifiedSQL, now, key, now); + return now; + } catch (Exception e) { + throw convert("Can not update lastModified", e); + } + } + return lastModified; + } + + /** + * Get the database type (if set). + * @return the database type + */ + public String getDatabaseType() { + return databaseType; + } + + /** + * Set the database type. By default the sub-protocol of the JDBC database URL is used if it is not set. + * It must match the resource file [databaseType].properties. Example: mysql. + * + * @param databaseType + */ + public void setDatabaseType(String databaseType) { + this.databaseType = databaseType; + } + + /** + * Get the database driver + * + * @return the driver + */ + public String getDriver() { + return driver; + } + + /** + * Set the database driver class name. + * If not set, the default driver class name for the database type is used, + * as set in the [databaseType].properties resource; key 'driver'. + * + * @param driver + */ + public void setDriver(String driver) { + this.driver = driver; + } + + /** + * Get the password. + * + * @return the password + */ + public String getPassword() { + return password; + } + + /** + * Set the password. + * + * @param password + */ + public void setPassword(String password) { + this.password = password; + } + + /** + * Get the database URL. + * + * @return the URL + */ + public String getUrl() { + return url; + } + + /** + * Set the database URL. + * Example: jdbc:postgresql:test + * + * @param url + */ + public void setUrl(String url) { + this.url = url; + } + + /** + * Get the user name. + * + * @return the user name + */ + public String getUser() { + return user; + } + + /** + * Set the user name. + * + * @param user + */ + public void setUser(String user) { + this.user = user; + } + + /** + * @return whether the schema check is enabled + */ + public final boolean isSchemaCheckEnabled() { + return schemaCheckEnabled; + } + + /** + * @param enabled set whether the schema check is enabled + */ + public final void setSchemaCheckEnabled(boolean enabled) { + schemaCheckEnabled = enabled; + } + + public synchronized void close() throws DataStoreException { + // nothing to do + } + + protected void usesIdentifier(DataIdentifier identifier) { + inUse.put(identifier, new WeakReference(identifier)); + } + + public void clearInUse() { + inUse.clear(); + } + + protected synchronized MessageDigest getDigest() throws DataStoreException { + try { + return MessageDigest.getInstance(DIGEST); + } catch (NoSuchAlgorithmException e) { + throw convert("No such algorithm: " + DIGEST, e); + } + } + + /** + * Get the maximum number of concurrent connections. + * + * @deprecated + * @return the maximum number of connections. + */ + public int getMaxConnections() { + return -1; + } + + /** + * Set the maximum number of concurrent connections in the pool. + * At least 3 connections are required if the garbage collection process is used. + * + *@deprecated + * @param maxConnections the new value + */ + public void setMaxConnections(int maxConnections) { + // no effect + } + + /** + * Is a stream copied to a temporary file before returning? + * + * @return the setting + */ + public boolean getCopyWhenReading() { + return copyWhenReading; + } + + /** + * The the copy setting. If enabled, + * a stream is always copied to a temporary file when reading a stream. + * + * @param copyWhenReading the new setting + */ + public void setCopyWhenReading(boolean copyWhenReading) { + this.copyWhenReading = copyWhenReading; + } + + /** + * Get the table prefix. + * + * @return the table prefix. + */ + public String getTablePrefix() { + return tablePrefix; + } + + /** + * Set the new table prefix. The default is empty. + * The table name is constructed like this: + * ${tablePrefix}${schemaObjectPrefix}${tableName} + * + * @param tablePrefix the new value + */ + public void setTablePrefix(String tablePrefix) { + this.tablePrefix = tablePrefix; + } + + /** + * Get the schema prefix. + * + * @return the schema object prefix + */ + public String getSchemaObjectPrefix() { + return schemaObjectPrefix; + } + + /** + * Set the schema object prefix. The default is empty. + * The table name is constructed like this: + * ${tablePrefix}${schemaObjectPrefix}${tableName} + * + * @param schemaObjectPrefix the new prefix + */ + public void setSchemaObjectPrefix(String schemaObjectPrefix) { + this.schemaObjectPrefix = schemaObjectPrefix; + } + + public String getDataSourceName() { + return dataSourceName; + } + + public void setDataSourceName(String dataSourceName) { + this.dataSourceName = dataSourceName; + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/db/DbInputStream.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/db/DbInputStream.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/db/DbInputStream.java (working copy) @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.data.db; + +import java.io.EOFException; +import java.io.IOException; +import java.sql.ResultSet; + +import org.apache.commons.io.input.AutoCloseInputStream; +import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.core.util.db.DbUtility; + +/** + * This class represents an input stream backed by a database. The database + * objects are only acquired when reading from the stream, and stay open until + * the stream is closed, fully read, or garbage collected. + *

+ * This class does not support mark/reset. It is always to be wrapped + * using a BufferedInputStream. + */ +public class DbInputStream extends AutoCloseInputStream { + + protected DbDataStore store; + protected DataIdentifier identifier; + protected boolean endOfStream; + + protected ResultSet rs; + + /** + * Create a database input stream for the given identifier. + * Database access is delayed until the first byte is read from the stream. + * + * @param store the database data store + * @param identifier the data identifier + */ + protected DbInputStream(DbDataStore store, DataIdentifier identifier) { + super(null); + this.store = store; + this.identifier = identifier; + } + + /** + * Open the stream if required. + * + * @throws IOException + */ + protected void openStream() throws IOException { + if (endOfStream) { + throw new EOFException(); + } + if (in == null) { + try { + in = store.openStream(this, identifier); + } catch (DataStoreException e) { + IOException e2 = new IOException(e.getMessage()); + e2.initCause(e); + throw e2; + } + } + } + + /** + * {@inheritDoc} + * When the stream is consumed, the database objects held by the instance are closed. + */ + public int read() throws IOException { + if (endOfStream) { + return -1; + } + openStream(); + int c = in.read(); + if (c == -1) { + endOfStream = true; + close(); + } + return c; + } + + /** + * {@inheritDoc} + * When the stream is consumed, the database objects held by the instance are closed. + */ + public int read(byte[] b) throws IOException { + return read(b, 0, b.length); + } + + /** + * {@inheritDoc} + * When the stream is consumed, the database objects held by the instance are closed. + */ + public int read(byte[] b, int off, int len) throws IOException { + if (endOfStream) { + return -1; + } + openStream(); + int c = in.read(b, off, len); + if (c == -1) { + endOfStream = true; + close(); + } + return c; + } + + /** + * {@inheritDoc} + * When the stream is consumed, the database objects held by the instance are closed. + */ + public void close() throws IOException { + if (in != null) { + in.close(); + in = null; + // some additional database objects + // may need to be closed + if (rs != null) { + DbUtility.close(rs); + rs = null; + } + } + } + + /** + * {@inheritDoc} + */ + public long skip(long n) throws IOException { + if (endOfStream) { + return -1; + } + openStream(); + return in.skip(n); + } + + /** + * {@inheritDoc} + */ + public int available() throws IOException { + if (endOfStream) { + return 0; + } + openStream(); + return in.available(); + } + + /** + * This method does nothing. + */ + public void mark(int readlimit) { + // do nothing + } + + /** + * This method does nothing. + */ + public void reset() throws IOException { + // do nothing + } + + /** + * Check whether mark and reset are supported. + * + * @return false + */ + public boolean markSupported() { + return false; + } + + /** + * Set the result set of this input stream. This object must be closed once + * the stream is closed. + * + * @param rs the result set + */ + void setResultSet(ResultSet rs) { + this.rs = rs; + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/db/DerbyDataStore.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/db/DerbyDataStore.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/db/DerbyDataStore.java (working copy) @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.data.db; + +import java.sql.SQLException; + +import javax.sql.DataSource; + +import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.core.util.db.ConnectionHelper; +import org.apache.jackrabbit.core.util.db.DerbyConnectionHelper; + +/** + * The Derby data store closes the database when the data store is closed + * (embedded databases only). + */ +public class DerbyDataStore extends DbDataStore { + + /** + * {@inheritDoc} + */ + @Override + protected ConnectionHelper createConnectionHelper(DataSource dataSrc) throws Exception { + return new DerbyConnectionHelper(dataSrc, false); + } + + /** + * {@inheritDoc} + */ + @Override + public synchronized void close() throws DataStoreException { + super.close(); + try { + ((DerbyConnectionHelper) conHelper).shutDown(getDriver()); + } catch (SQLException e) { + throw new DataStoreException(e); + } + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/db/TempFileInputStream.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/db/TempFileInputStream.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/data/db/TempFileInputStream.java (working copy) @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.data.db; + +import java.io.BufferedInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import org.apache.commons.io.IOUtils; +import org.apache.commons.io.input.AutoCloseInputStream; + +/** + * An input stream from a temporary file. The file is deleted when the stream is + * closed, fully read, or garbage collected. + *

+ * This class does not support mark/reset. It is always to be wrapped + * using a BufferedInputStream. + */ +public class TempFileInputStream extends AutoCloseInputStream { + + private final File file; + private boolean closed; + private boolean delayedResourceCleanup = true; + + /** + * Copy the data to a file and close the input stream afterwards. + * + * @param in the input stream + * @param file the target file + * @return the size of the file + */ + public static long writeToFileAndClose(InputStream in, File file) throws IOException { + OutputStream out = new FileOutputStream(file); + IOUtils.copy(in, out); + out.close(); + in.close(); + return file.length(); + } + + /** + * Construct a new temporary file input stream. + * The file is deleted if the input stream is closed or fully read and + * delayedResourceCleanup was set to true. Otherwise you must call {@link #deleteFile()}. + * Deleting is only attempted once. + * + * @param file the temporary file + * @param delayedResourceCleanup + */ + public TempFileInputStream(File file, boolean delayedResourceCleanup) throws FileNotFoundException { + super(new BufferedInputStream(new FileInputStream(file))); + this.file = file; + this.delayedResourceCleanup = delayedResourceCleanup; + } + + public File getFile() { + return file; + } + + public void deleteFile() { + file.delete(); + } + + private int closeIfEOF(int read) throws IOException { + if (read < 0) { + close(); + } + return read; + } + + public void close() throws IOException { + if (!closed) { + in.close(); + if (!delayedResourceCleanup) { + deleteFile(); + } + closed = true; + } + } + + public int available() throws IOException { + return in.available(); + } + + /** + * This method does nothing. + */ + public void mark(int readlimit) { + // do nothing + } + + /** + * Check whether mark and reset are supported. + * + * @return false + */ + public boolean markSupported() { + return false; + } + + public long skip(long n) throws IOException { + return in.skip(n); + } + + public void reset() throws IOException { + in.reset(); + } + + public int read(byte[] b, int off, int len) throws IOException { + if (closed) { + return -1; + } + return closeIfEOF(in.read(b, off, len)); + } + + public int read(byte[] b) throws IOException { + if (closed) { + return -1; + } + return closeIfEOF(in.read(b)); + } + + public int read() throws IOException { + if (closed) { + return -1; + } + return closeIfEOF(in.read()); + } + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/BasedFileSystem.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/BasedFileSystem.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/BasedFileSystem.java (working copy) @@ -0,0 +1,187 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.fs; + +import java.io.InputStream; +import java.io.OutputStream; + + +/** + * A BasedFileSystem represents a 'file system in a file system'. + */ +public class BasedFileSystem implements FileSystem { + + protected final FileSystem fsBase; + + protected final String basePath; + + /** + * Creates a new BasedFileSystem + * + * @param fsBase the FileSystem the new file system should be based on + * @param relRootPath the root path relative to fsBase's root + */ + public BasedFileSystem(FileSystem fsBase, String relRootPath) { + if (fsBase == null) { + throw new IllegalArgumentException("invalid file system argument"); + } + this.fsBase = fsBase; + + if (relRootPath == null) { + throw new IllegalArgumentException("invalid null path argument"); + } + if (relRootPath.equals(SEPARATOR)) { + throw new IllegalArgumentException("invalid path argument"); + } + if (!relRootPath.startsWith(SEPARATOR)) { + relRootPath = SEPARATOR + relRootPath; + } + if (relRootPath.endsWith(SEPARATOR)) { + relRootPath = relRootPath.substring(0, relRootPath.length() - 1); + + } + this.basePath = relRootPath; + } + + protected String buildBasePath(String path) { + if (path.startsWith(SEPARATOR)) { + if (path.length() == 1) { + return basePath; + } else { + return basePath + path; + } + } else { + return basePath + SEPARATOR + path; + } + } + + //-----------------------------------------------------------< FileSystem > + /** + * {@inheritDoc} + */ + public void init() throws FileSystemException { + // check base path + if (!fsBase.isFolder(basePath)) { + fsBase.createFolder(basePath); + } + } + + /** + * {@inheritDoc} + */ + public void close() throws FileSystemException { + // do nothing; base file system should be closed explicitly + } + + /** + * {@inheritDoc} + */ + public void createFolder(String folderPath) throws FileSystemException { + fsBase.createFolder(buildBasePath(folderPath)); + } + + /** + * {@inheritDoc} + */ + public void deleteFile(String filePath) throws FileSystemException { + fsBase.deleteFile(buildBasePath(filePath)); + } + + /** + * {@inheritDoc} + */ + public void deleteFolder(String folderPath) throws FileSystemException { + fsBase.deleteFolder(buildBasePath(folderPath)); + } + + /** + * {@inheritDoc} + */ + public boolean exists(String path) throws FileSystemException { + return fsBase.exists(buildBasePath(path)); + } + + /** + * {@inheritDoc} + */ + public InputStream getInputStream(String filePath) throws FileSystemException { + return fsBase.getInputStream(buildBasePath(filePath)); + } + + /** + * {@inheritDoc} + */ + public OutputStream getOutputStream(String filePath) throws FileSystemException { + return fsBase.getOutputStream(buildBasePath(filePath)); + } + + /** + * {@inheritDoc} + */ + public boolean hasChildren(String path) throws FileSystemException { + return fsBase.hasChildren(buildBasePath(path)); + } + + /** + * {@inheritDoc} + */ + public boolean isFile(String path) throws FileSystemException { + return fsBase.isFile(buildBasePath(path)); + } + + /** + * {@inheritDoc} + */ + public boolean isFolder(String path) throws FileSystemException { + return fsBase.isFolder(buildBasePath(path)); + } + + /** + * {@inheritDoc} + */ + public long lastModified(String path) throws FileSystemException { + return fsBase.lastModified(buildBasePath(path)); + } + + /** + * {@inheritDoc} + */ + public long length(String filePath) throws FileSystemException { + return fsBase.length(buildBasePath(filePath)); + } + + /** + * {@inheritDoc} + */ + public String[] list(String folderPath) throws FileSystemException { + return fsBase.list(buildBasePath(folderPath)); + } + + /** + * {@inheritDoc} + */ + public String[] listFiles(String folderPath) throws FileSystemException { + return fsBase.listFiles(buildBasePath(folderPath)); + } + + /** + * {@inheritDoc} + */ + public String[] listFolders(String folderPath) throws FileSystemException { + return fsBase.listFolders(buildBasePath(folderPath)); + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/FileSystem.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/FileSystem.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/FileSystem.java (working copy) @@ -0,0 +1,206 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.fs; + +import java.io.InputStream; +import java.io.OutputStream; + +/** + * The FileSystem interface is an abstraction of a virtual + * file system. The similarities of its method names with with the methods + * of the java.io.File class are intentional. + *
+ * Implementations of this interface expose a file system-like resource. + * File system-like resources include WebDAV-enabled servers, local file systems, + * and so forth. + */ +public interface FileSystem { + + /** + * File separator + */ + String SEPARATOR = "/"; + + /** + * File separator character + */ + char SEPARATOR_CHAR = '/'; + + /** + * Initialize the file system + * + * @throws FileSystemException if the file system initialization fails + */ + void init() throws FileSystemException; + + /** + * Close the file system. After calling this method, the file system is no + * longer accessible. + * + * @throws FileSystemException + */ + void close() throws FileSystemException; + + /** + * Returns an input stream of the contents of the file denoted by this path. + * + * @param filePath the path of the file. + * @return an input stream of the contents of the file. + * @throws FileSystemException if the file does not exist + * or if it cannot be read from + */ + InputStream getInputStream(String filePath) throws FileSystemException; + + /** + * Returns an output stream for writing bytes to the file denoted by this path. + * The file will be created if it doesn't exist. If the file exists, its contents + * will be overwritten. + * + * @param filePath the path of the file. + * @return an output stream for writing bytes to the file. + * @throws FileSystemException if the file cannot be written to or created + */ + OutputStream getOutputStream(String filePath) throws FileSystemException; + + /** + * Creates the folder named by this path, including any necessary but + * nonexistent parent folders. Note that if this operation fails it + * may have succeeded in creating some of the necessary parent folders. + * + * @param folderPath the path of the folder to be created. + * @throws FileSystemException if a file system entry denoted by path + * already exists or if another error occurs. + */ + void createFolder(String folderPath) throws FileSystemException; + + /** + * Tests whether the file system entry denoted by this path exists. + * + * @param path the path of a file system entry. + * @return true if the file system entry at path exists; false otherwise. + * @throws FileSystemException + */ + boolean exists(String path) throws FileSystemException; + + /** + * Tests whether the file system entry denoted by this path exists and + * is a file. + * + * @param path the path of a file system entry. + * @return true if the file system entry at path is a file; false otherwise. + * @throws FileSystemException + */ + boolean isFile(String path) throws FileSystemException; + + /** + * Tests whether the file system entry denoted by this path exists and + * is a folder. + * + * @param path the path of a file system entry. + * @return true if the file system entry at path is a folder; false otherwise. + * @throws FileSystemException + */ + boolean isFolder(String path) throws FileSystemException; + + /** + * Tests whether the file system entry denoted by this path has child entries. + * + * @param path the path of a file system entry. + * @return true if the file system entry at path has child entries; false otherwise. + * @throws FileSystemException + */ + boolean hasChildren(String path) throws FileSystemException; + + /** + * Returns the length of the file denoted by this path. + * + * @param filePath the path of the file. + * @return The length, in bytes, of the file denoted by this path, + * or -1L if the length can't be determined. + * @throws FileSystemException if the path does not denote an existing file. + */ + long length(String filePath) throws FileSystemException; + + /** + * Returns the time that the file system entry denoted by this path + * was last modified. + * + * @param path the path of a file system entry. + * @return A long value representing the time the file system entry was + * last modified, measured in milliseconds since the epoch + * (00:00:00 GMT, January 1, 1970), or 0L if the modification + * time can't be determined. + * @throws FileSystemException if the file system entry does not exist. + */ + long lastModified(String path) throws FileSystemException; + + /** + * Returns an array of strings naming the files and folders + * in the folder denoted by this path. + * + * @param folderPath the path of the folder whose contents is to be listed. + * @return an array of strings naming the files and folders + * in the folder denoted by this path. + * @throws FileSystemException if this path does not denote a folder or if + * another error occurs. + */ + String[] list(String folderPath) throws FileSystemException; + + /** + * Returns an array of strings naming the files in the folder + * denoted by this path. + * + * @param folderPath the path of the folder whose contents is to be listed. + * @return an array of strings naming the files in the folder + * denoted by this path. + * @throws FileSystemException if this path does not denote a folder or if + * another error occurs. + */ + String[] listFiles(String folderPath) throws FileSystemException; + + /** + * Returns an array of strings naming the folders in the folder + * denoted by this path. + * + * @param folderPath the path of the folder whose contents is to be listed. + * @return an array of strings naming the folders in the folder + * denoted by this path. + * @throws FileSystemException if this path does not denote a folder or if + * another error occurs. + */ + String[] listFolders(String folderPath) throws FileSystemException; + + /** + * Deletes the file denoted by this path. + * + * @param filePath the path of the file to be deleted. + * @throws FileSystemException if this path does not denote a file or if + * another error occurs. + */ + void deleteFile(String filePath) throws FileSystemException; + + /** + * Deletes the folder denoted by this path. Any contents of this folder + * (folders and files) will be deleted recursively. + * + * @param folderPath the path of the folder to be deleted. + * @throws FileSystemException if this path does not denote a folder or if + * another error occurs. + */ + void deleteFolder(String folderPath) throws FileSystemException; + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/FileSystemException.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/FileSystemException.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/FileSystemException.java (working copy) @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.fs; + +/** + * The FileSystemException signals an error within a file system + * operation. FileSystemExceptions are thrown by {@link FileSystem} + * implementations. + */ +public class FileSystemException extends Exception { + + /** + * Constructs a new instance of this class with the specified detail + * message. + * + * @param message the detail message. The detail message is saved for + * later retrieval by the {@link #getMessage()} method. + */ + public FileSystemException(String message) { + super(message); + } + + /** + * Constructs a new instance of this class with the specified detail + * message and root cause. + * + * @param message the detail message. The detail message is saved for + * later retrieval by the {@link #getMessage()} method. + * @param rootCause root failure cause + */ + public FileSystemException(String message, Throwable rootCause) { + super(message, rootCause); + } + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/FileSystemFactory.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/FileSystemFactory.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/FileSystemFactory.java (working copy) @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.fs; + +import javax.jcr.RepositoryException; + + +/** + * Factory interface for creating {@link FileSystem} instances. Used + * to decouple the repository internals from the repository configuration + * mechanism. + */ +public interface FileSystemFactory { + + /** + * Creates, initializes, and returns a {@link FileSystem} instance + * for use by the repository. Note that no information is passed from + * the client, so all required configuration information must be + * encapsulated in the factory. + * + * @return initialized file system + * @throws RepositoryException if the file system can not be created + */ + FileSystem getFileSystem() throws RepositoryException; + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/FileSystemPathUtil.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/FileSystemPathUtil.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/FileSystemPathUtil.java (working copy) @@ -0,0 +1,229 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.fs; + +import java.io.ByteArrayOutputStream; +import java.util.BitSet; + + +/** + * Utility class for handling paths in a file system. + */ +public final class FileSystemPathUtil { + + /** + * Array of lowercase hexadecimal characters used in creating hex escapes. + */ + private static final char[] HEX_TABLE = "0123456789abcdef".toCharArray(); + + /** + * The escape character used to mark hex escape sequences. + */ + private static final char ESCAPE_CHAR = '%'; + + /** + * The list of characters that are not encoded by the escapeName(String) + * and unescape(String) methods. They contains the characters + * which can safely be used in file names: + */ + public static final BitSet SAFE_NAMECHARS; + + /** + * The list of characters that are not encoded by the escapePath(String) + * and unescape(String) methods. They contains the characters + * which can safely be used in file paths: + */ + public static final BitSet SAFE_PATHCHARS; + + static { + // build list of valid name characters + SAFE_NAMECHARS = new BitSet(256); + int i; + for (i = 'a'; i <= 'z'; i++) { + SAFE_NAMECHARS.set(i); + } + for (i = 'A'; i <= 'Z'; i++) { + SAFE_NAMECHARS.set(i); + } + for (i = '0'; i <= '9'; i++) { + SAFE_NAMECHARS.set(i); + } + SAFE_NAMECHARS.set('-'); + SAFE_NAMECHARS.set('_'); + SAFE_NAMECHARS.set('.'); + + // build list of valid path characters (includes name characters) + SAFE_PATHCHARS = (BitSet) SAFE_NAMECHARS.clone(); + SAFE_PATHCHARS.set(FileSystem.SEPARATOR_CHAR); + } + + /** + * private constructor + */ + private FileSystemPathUtil() { + } + + /** + * Escapes the given string using URL encoding for all bytes not included + * in the given set of safe characters. + * + * @param s the string to escape + * @param safeChars set of safe characters (bytes) + * @return escaped string + */ + private static String escape(String s, BitSet safeChars) { + byte[] bytes = s.getBytes(); + StringBuilder out = new StringBuilder(bytes.length); + for (int i = 0; i < bytes.length; i++) { + int c = bytes[i] & 0xff; + if (safeChars.get(c) && c != ESCAPE_CHAR) { + out.append((char) c); + } else { + out.append(ESCAPE_CHAR); + out.append(HEX_TABLE[(c >> 4) & 0x0f]); + out.append(HEX_TABLE[(c) & 0x0f]); + } + } + return out.toString(); + } + + /** + * Encodes the specified path. Same as + * {@link #escapeName(String)} except that the separator + * character / is regarded as a legal path character + * that needs no escaping. + * + * @param path the path to encode. + * @return the escaped path + */ + public static String escapePath(String path) { + return escape(path, SAFE_PATHCHARS); + } + + /** + * Encodes the specified name. Same as + * {@link #escapePath(String)} except that the separator character + * / is regarded as an illegal character that needs + * escaping. + * + * @param name the name to encode. + * @return the escaped name + */ + public static String escapeName(String name) { + return escape(name, SAFE_NAMECHARS); + } + + /** + * Decodes the specified path/name. + * + * @param pathOrName the escaped path/name + * @return the unescaped path/name + */ + public static String unescape(String pathOrName) { + ByteArrayOutputStream out = new ByteArrayOutputStream(pathOrName.length()); + for (int i = 0; i < pathOrName.length(); i++) { + char c = pathOrName.charAt(i); + if (c == ESCAPE_CHAR) { + try { + out.write(Integer.parseInt(pathOrName.substring(i + 1, i + 3), 16)); + } catch (NumberFormatException e) { + IllegalArgumentException iae = new IllegalArgumentException("Failed to unescape escape sequence"); + iae.initCause(e); + throw iae; + } + i += 2; + } else { + out.write(c); + } + } + return new String(out.toByteArray()); + } + + /** + * Tests whether the specified path represents the root path, i.e. "/". + * + * @param path path to test + * @return true if the specified path represents the root path; false otherwise. + */ + public static boolean denotesRoot(String path) { + return path.equals(FileSystem.SEPARATOR); + } + + /** + * Checks if path is a valid path. + * + * @param path the path to be checked + * @throws FileSystemException If path is not a valid path + */ + public static void checkFormat(String path) throws FileSystemException { + if (path == null) { + throw new FileSystemException("null path"); + } + + // path must be absolute, i.e. starting with '/' + if (!path.startsWith(FileSystem.SEPARATOR)) { + throw new FileSystemException("not an absolute path: " + path); + } + + // trailing '/' is not allowed (except for root path) + if (path.endsWith(FileSystem.SEPARATOR) && path.length() > 1) { + throw new FileSystemException("malformed path: " + path); + } + + String[] names = path.split(FileSystem.SEPARATOR); + for (int i = 1; i < names.length; i++) { + // name must not be empty + if (names[i].length() == 0) { + throw new FileSystemException("empty name: " + path); + } + // leading/trailing whitespace is not allowed + String trimmed = names[i].trim(); + if (!trimmed.equals(names[i])) { + throw new FileSystemException("illegal leading or trailing whitespace in name: " + path); + } + } + } + + /** + * Returns the parent directory of the specified path. + * + * @param path a file system path denoting a directory or a file. + * @return the parent directory. + */ + public static String getParentDir(String path) { + int pos = path.lastIndexOf(FileSystem.SEPARATOR_CHAR); + if (pos > 0) { + return path.substring(0, pos); + } + return FileSystem.SEPARATOR; + } + + /** + * Returns the name of the specified path. + * + * @param path a file system path denoting a directory or a file. + * @return the name. + */ + public static String getName(String path) { + int pos = path.lastIndexOf(FileSystem.SEPARATOR_CHAR); + if (pos != -1) { + return path.substring(pos + 1); + } + return path; + } + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/FileSystemResource.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/FileSystemResource.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/FileSystemResource.java (working copy) @@ -0,0 +1,226 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.fs; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import org.apache.commons.io.IOUtils; +import org.apache.jackrabbit.core.fs.FileSystemPathUtil; + +/** + * A FileSystemResource represents a resource (i.e. file) in a + * FileSystem. + */ +public class FileSystemResource { + + protected final FileSystem fs; + + protected final String path; + + static { + // preload FileSystemPathUtil to prevent classloader issues during shutdown + FileSystemPathUtil.class.hashCode(); + } + + /** + * Creates a new FileSystemResource + * + * @param fs the FileSystem where the resource is located + * @param path the path of the resource in the FileSystem + */ + public FileSystemResource(FileSystem fs, String path) { + if (fs == null) { + throw new IllegalArgumentException("invalid file system argument"); + } + this.fs = fs; + + if (path == null) { + throw new IllegalArgumentException("invalid path argument"); + } + this.path = path; + } + + /** + * Returns the FileSystem where this resource is located. + * + * @return the FileSystem where this resource is located. + */ + public FileSystem getFileSystem() { + return fs; + } + + /** + * Returns the path of this resource. + * + * @return the path of this resource. + */ + public String getPath() { + return path; + } + + /** + * Returns the parent directory of this resource. + * + * @return the parent directory. + */ + public String getParentDir() { + return FileSystemPathUtil.getParentDir(path); + } + + /** + * Returns the name of this resource. + * + * @return the name. + */ + public String getName() { + return FileSystemPathUtil.getName(path); + } + + /** + * Creates the parent directory of this resource, including any necessary + * but nonexistent parent directories. + * + * @throws FileSystemException + */ + public synchronized void makeParentDirs() throws FileSystemException { + String parentDir = getParentDir(); + if (!fs.exists(parentDir)) { + fs.createFolder(parentDir); + } + } + + /** + * Deletes this resource. + * Same as {@link #delete(false)}. + * + * @see FileSystem#deleteFile + */ + public void delete() throws FileSystemException { + delete(false); + } + + /** + * Deletes this resource. + * + * @param pruneEmptyParentDirs if true, empty parent folders will + * automatically be deleted + * @see FileSystem#deleteFile + */ + public synchronized void delete(boolean pruneEmptyParentDirs) throws FileSystemException { + fs.deleteFile(path); + if (pruneEmptyParentDirs) { + // prune empty parent folders + String parentDir = FileSystemPathUtil.getParentDir(path); + while (!parentDir.equals(FileSystem.SEPARATOR) + && fs.exists(parentDir) + && !fs.hasChildren(parentDir)) { + fs.deleteFolder(parentDir); + parentDir = FileSystemPathUtil.getParentDir(parentDir); + } + } + } + + /** + * @see FileSystem#exists + */ + public boolean exists() throws FileSystemException { + return fs.exists(path); + } + + /** + * @see FileSystem#getInputStream + */ + public InputStream getInputStream() throws FileSystemException { + return fs.getInputStream(path); + } + + /** + * Spools this resource to the given output stream. + * + * @param out output stream where to spool the resource + * @throws FileSystemException if the input stream for this resource could + * not be obtained + * @throws IOException if an error occurs while while spooling + * @see FileSystem#getInputStream + */ + public void spool(OutputStream out) throws FileSystemException, IOException { + InputStream in = fs.getInputStream(path); + try { + IOUtils.copy(in, out); + } finally { + IOUtils.closeQuietly(in); + } + } + + /** + * @see FileSystem#getOutputStream + */ + public OutputStream getOutputStream() throws FileSystemException { + return fs.getOutputStream(path); + } + + /** + * @see FileSystem#lastModified + */ + public long lastModified() throws FileSystemException { + return fs.lastModified(path); + } + + /** + * @see FileSystem#length + */ + public long length() throws FileSystemException { + return fs.length(path); + } + + //-------------------------------------------< java.lang.Object overrides > + /** + * Returns the path string of this resource. This is just the + * string returned by the {@link #getPath} method. + * + * @return The path string of this resource + */ + public String toString() { + return getPath(); + } + + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj instanceof FileSystemResource) { + FileSystemResource other = (FileSystemResource) obj; + return (path == null ? other.path == null : path.equals(other.path)) + && (fs == null ? other.fs == null : fs.equals(other.fs)); + } + return false; + } + + /** + * Returns zero to satisfy the Object equals/hashCode contract. + * This class is mutable and not meant to be used as a hash key. + * + * @return always zero + * @see Object#hashCode() + */ + public int hashCode() { + return 0; + } + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/RandomAccessOutputStream.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/RandomAccessOutputStream.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/RandomAccessOutputStream.java (working copy) @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.fs; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * Extends the regular java.io.OutputStream with a random + * access facility. Multiple write() operations can be + * positioned off sequence with the {@link #seek} method. + * + * @deprecated this class should no longer be used + */ +public abstract class RandomAccessOutputStream extends OutputStream { + + /** + * Sets the current position in the resource where the next write + * will occur. + * + * @param position the new position in the resource. + * @throws IOException if an error occurs while seeking to the position. + */ + public abstract void seek(long position) throws IOException; +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/local/FileUtil.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/local/FileUtil.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/local/FileUtil.java (working copy) @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.fs.local; + +import java.io.File; +import java.io.IOException; + +import org.apache.commons.io.FileUtils; + +/** + * Static utility methods for recursively copying and deleting files and + * directories. + */ +public final class FileUtil { + + /** + * private constructor + */ + private FileUtil() { + } + + /** + * Recursively copies the given file or directory to the + * given destination. + * + * @param src source file or directory + * @param dest destination file or directory + * @throws IOException if the file or directory cannot be copied + */ + public static void copy(File src, File dest) throws IOException { + if (!src.canRead()) { + throw new IOException(src.getPath() + " can't be read from."); + } + if (src.isDirectory()) { + // src is a folder + if (dest.isFile()) { + throw new IOException("can't copy a folder to a file"); + } + if (!dest.exists()) { + dest.mkdirs(); + } + if (!dest.canWrite()) { + throw new IOException("can't write to " + dest.getPath()); + } + File[] children = src.listFiles(); + for (int i = 0; i < children.length; i++) { + copy(children[i], new File(dest, children[i].getName())); + } + } else { + // src is a file + File destParent; + if (dest.isDirectory()) { + // dest is a folder + destParent = dest; + dest = new File(destParent, src.getName()); + } else { + destParent = dest.getParentFile(); + } + if (!destParent.canWrite()) { + throw new IOException("can't write to " + destParent.getPath()); + } + + FileUtils.copyFile(src, dest); + } + } + + /** + * Recursively deletes the given file or directory. + * + * @param f file or directory + * @throws IOException if the file or directory cannot be deleted + */ + public static void delete(File f) throws IOException { + if (f.isDirectory()) { + // it's a folder, list children first + File[] children = f.listFiles(); + for (int i = 0; i < children.length; i++) { + delete(children[i]); + } + } + if (!f.delete()) { + throw new IOException("Unable to delete " + f.getPath()); + } + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/local/HandleMonitor.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/local/HandleMonitor.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/local/HandleMonitor.java (working copy) @@ -0,0 +1,217 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.fs.local; + +import org.apache.jackrabbit.util.LazyFileInputStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.util.HashMap; +import java.util.HashSet; + +/** + * This Class implements a very simple open handle monitor for the local + * file system. This is usefull, if the list of open handles, referenced by + * an open FileInputStream() should be tracked. This can cause problems on + * windows filesystems where open files cannot be deleted. + */ +public class HandleMonitor { + + /** + * The default logger + */ + private static Logger log = LoggerFactory.getLogger(HandleMonitor.class); + + /** + * the map of open handles (key=File, value=Handle) + */ + private HashMap openHandles = new HashMap(); + + /** + * Opens a file and returns an InputStream + * + * @param file + * @return + * @throws FileNotFoundException + */ + public InputStream open(File file) throws FileNotFoundException { + Handle handle = getHandle(file); + InputStream in = handle.open(); + return in; + } + + /** + * Checks, if the file is open + * @param file + * @return + */ + public boolean isOpen(File file) { + return openHandles.containsKey(file); + } + + /** + * Closes a file + * @param file + */ + private void close(File file) { + openHandles.remove(file); + } + + /** + * Returns the handle for a file. + * @param file + * @return + */ + private Handle getHandle(File file) { + Handle handle = openHandles.get(file); + if (handle == null) { + handle = new Handle(file); + openHandles.put(file, handle); + } + return handle; + } + + /** + * Dumps the contents of this monitor + */ + public void dump() { + log.info("Number of open files: " + openHandles.size()); + for (File file : openHandles.keySet()) { + Handle handle = openHandles.get(file); + handle.dump(); + } + } + + /** + * Dumps the information for a file + * @param file + */ + public void dump(File file) { + Handle handle = openHandles.get(file); + if (handle != null) { + handle.dump(true); + } + } + + /** + * Class representing all open handles to a file + */ + private class Handle { + + /** + * the file of this handle + */ + private File file; + + /** + * all open streams of this handle + */ + private HashSet streams = new HashSet(); + + /** + * Creates a new handle for a file + * @param file + */ + private Handle(File file) { + this.file = file; + } + + /** + * opens a stream for this handle + * @return + * @throws FileNotFoundException + */ + private InputStream open() throws FileNotFoundException { + Handle.MonitoredInputStream in = new Handle.MonitoredInputStream(file); + streams.add(in); + return in; + } + + /** + * Closes a stream + * @param in + */ + private void close(MonitoredInputStream in) { + streams.remove(in); + if (streams.isEmpty()) { + HandleMonitor.this.close(file); + } + } + + /** + * Dumps this handle + */ + private void dump() { + dump(false); + } + + /** + * Dumps this handle + */ + private void dump(boolean detailed) { + if (detailed) { + log.info("- " + file.getPath() + ", " + streams.size()); + for (Handle.MonitoredInputStream in : streams) { + in.dump(); + } + } else { + log.info("- " + file.getPath() + ", " + streams.size()); + } + } + + /** + * Delegating input stream that registers/unregisters itself from the + * handle. + */ + private class MonitoredInputStream extends LazyFileInputStream { + + /** + * throwable of the time, the stream was created + */ + private final Throwable throwable = new Exception(); + + /** + * {@inheritDoc} + */ + private MonitoredInputStream(File file) throws FileNotFoundException { + super(file); + } + + /** + * dumps this stream + */ + private void dump() { + log.info("- opened by : ", throwable); + } + + /** + * {@inheritDoc} + */ + public void close() throws IOException { + // remove myself from the set + Handle.this.close(this); + super.close(); + } + + } + } + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/local/LocalFileSystem.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/local/LocalFileSystem.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/fs/local/LocalFileSystem.java (working copy) @@ -0,0 +1,388 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.fs.local; + +import org.apache.jackrabbit.core.fs.FileSystem; +import org.apache.jackrabbit.core.fs.FileSystemException; +import org.apache.jackrabbit.core.fs.local.FileUtil; +import org.apache.jackrabbit.core.fs.local.HandleMonitor; +import org.apache.jackrabbit.util.LazyFileInputStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileFilter; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +/** + * A LocalFileSystem ... + */ +public class LocalFileSystem implements FileSystem { + + private static Logger log = LoggerFactory.getLogger(LocalFileSystem.class); + + private File root; + + private HandleMonitor monitor; + + /** + * Default constructor + */ + public LocalFileSystem() { + } + + public String getPath() { + if (root != null) { + return root.getPath(); + } else { + return null; + } + } + + /** + * Sets the path to the root directory of this local filesystem. please note + * that this method can be called via reflection during initialization and + * must not be altered. + * + * @param rootPath the path to the root directory + */ + public void setPath(String rootPath) { + setRoot(new File(osPath(rootPath))); + } + + public void setRoot(File root) { + this.root = root; + } + + /** + * Enables/Disables the use of the handle monitor. + * + * @param enable + */ + public void setEnableHandleMonitor(String enable) { + setEnableHandleMonitor(Boolean.valueOf(enable).booleanValue()); + } + + /** + * Enables/Disables the use of the handle monitor. + * + * @param enable flag + */ + public void setEnableHandleMonitor(boolean enable) { + if (enable && monitor == null) { + monitor = new HandleMonitor(); + } + if (!enable && monitor != null) { + monitor = null; + } + } + + /** + * Returns true if use of the handle monitor is currently + * enabled, otherwise returns false. + * + * @see #setEnableHandleMonitor(boolean) + */ + public String getEnableHandleMonitor() { + return monitor == null ? "false" : "true"; + } + + private String osPath(String genericPath) { + if (File.separator.equals(SEPARATOR)) { + return genericPath; + } + return genericPath.replace(SEPARATOR_CHAR, File.separatorChar); + } + + //-------------------------------------------< java.lang.Object overrides > + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj instanceof LocalFileSystem) { + LocalFileSystem other = (LocalFileSystem) obj; + if (root == null) { + return other.root == null; + } else { + return root.equals(other.root); + } + } + return false; + } + + /** + * Returns zero to satisfy the Object equals/hashCode contract. + * This class is mutable and not meant to be used as a hash key. + * + * @return always zero + * @see Object#hashCode() + */ + public int hashCode() { + return 0; + } + + //-----------------------------------------------------------< FileSystem > + /** + * {@inheritDoc} + */ + public void init() throws FileSystemException { + if (root == null) { + String msg = "root directory not set"; + log.debug(msg); + throw new FileSystemException(msg); + } + + if (root.exists()) { + if (!root.isDirectory()) { + String msg = "path does not denote a folder"; + log.debug(msg); + throw new FileSystemException(msg); + } + } else { + if (!root.mkdirs()) { + String msg = "failed to create root"; + log.debug(msg); + throw new FileSystemException(msg); + } + } + log.info("LocalFileSystem initialized at path " + root.getPath()); + if (monitor != null) { + log.info("LocalFileSystem using handle monitor"); + } + } + + /** + * {@inheritDoc} + */ + public void close() throws FileSystemException { + root = null; + } + + /** + * {@inheritDoc} + */ + public void createFolder(String folderPath) throws FileSystemException { + File f = new File(root, osPath(folderPath)); + if (f.exists()) { + String msg = f.getPath() + " already exists"; + log.debug(msg); + throw new FileSystemException(msg); + } + if (!f.mkdirs()) { + String msg = "failed to create folder " + f.getPath(); + log.debug(msg); + throw new FileSystemException(msg); + } + } + + /** + * {@inheritDoc} + */ + public void deleteFile(String filePath) throws FileSystemException { + File f = new File(root, osPath(filePath)); + if (!f.isFile()) { + String msg = f.getPath() + " does not denote an existing file"; + throw new FileSystemException(msg); + } + try { + FileUtil.delete(f); + } catch (IOException ioe) { + String msg = "failed to delete " + f.getPath(); + if (monitor != null && monitor.isOpen(f)) { + log.error("Unable to delete. There are still open streams."); + monitor.dump(f); + } + + throw new FileSystemException(msg, ioe); + } + } + + /** + * {@inheritDoc} + */ + public void deleteFolder(String folderPath) throws FileSystemException { + File f = new File(root, osPath(folderPath)); + if (!f.isDirectory()) { + String msg = f.getPath() + " does not denote an existing folder"; + log.debug(msg); + throw new FileSystemException(msg); + } + try { + FileUtil.delete(f); + } catch (IOException ioe) { + String msg = "failed to delete " + f.getPath(); + log.debug(msg); + throw new FileSystemException(msg, ioe); + } + } + + /** + * {@inheritDoc} + */ + public boolean exists(String path) throws FileSystemException { + File f = new File(root, osPath(path)); + return f.exists(); + } + + /** + * {@inheritDoc} + */ + public InputStream getInputStream(String filePath) + throws FileSystemException { + File f = new File(root, osPath(filePath)); + try { + if (monitor == null) { + return new LazyFileInputStream(f); + } else { + return monitor.open(f); + } + } catch (FileNotFoundException fnfe) { + String msg = f.getPath() + " does not denote an existing file"; + log.debug(msg); + throw new FileSystemException(msg, fnfe); + } + } + + /** + * {@inheritDoc} + */ + public OutputStream getOutputStream(String filePath) + throws FileSystemException { + File f = new File(root, osPath(filePath)); + try { + return new FileOutputStream(f); + } catch (FileNotFoundException fnfe) { + String msg = "failed to get output stream for " + f.getPath(); + log.debug(msg); + throw new FileSystemException(msg, fnfe); + } + } + + /** + * {@inheritDoc} + */ + public boolean hasChildren(String path) throws FileSystemException { + File f = new File(root, osPath(path)); + if (!f.exists()) { + String msg = f.getPath() + " does not exist"; + log.debug(msg); + throw new FileSystemException(msg); + } + if (f.isFile()) { + return false; + } + return (f.list().length > 0); + } + + /** + * {@inheritDoc} + */ + public boolean isFile(String path) throws FileSystemException { + File f = new File(root, osPath(path)); + return f.isFile(); + } + + /** + * {@inheritDoc} + */ + public boolean isFolder(String path) throws FileSystemException { + File f = new File(root, osPath(path)); + return f.isDirectory(); + } + + /** + * {@inheritDoc} + */ + public long lastModified(String path) throws FileSystemException { + File f = new File(root, osPath(path)); + return f.lastModified(); + } + + /** + * {@inheritDoc} + */ + public long length(String filePath) throws FileSystemException { + File f = new File(root, osPath(filePath)); + if (!f.exists()) { + return -1; + } + return f.length(); + } + + /** + * {@inheritDoc} + */ + public String[] list(String folderPath) throws FileSystemException { + File f = new File(root, osPath(folderPath)); + String[] entries = f.list(); + if (entries == null) { + String msg = folderPath + " does not denote a folder"; + log.debug(msg); + throw new FileSystemException(msg); + } + return entries; + } + + /** + * {@inheritDoc} + */ + public String[] listFiles(String folderPath) throws FileSystemException { + File folder = new File(root, osPath(folderPath)); + File[] files = folder.listFiles(new FileFilter() { + public boolean accept(File f) { + return f.isFile(); + } + }); + if (files == null) { + String msg = folderPath + " does not denote a folder"; + log.debug(msg); + throw new FileSystemException(msg); + } + String[] entries = new String[files.length]; + for (int i = 0; i < files.length; i++) { + entries[i] = files[i].getName(); + } + return entries; + } + + /** + * {@inheritDoc} + */ + public String[] listFolders(String folderPath) throws FileSystemException { + File file = new File(root, osPath(folderPath)); + File[] folders = file.listFiles(new FileFilter() { + public boolean accept(File f) { + return f.isDirectory(); + } + }); + if (folders == null) { + String msg = folderPath + " does not denote a folder"; + log.debug(msg); + throw new FileSystemException(msg); + } + String[] entries = new String[folders.length]; + for (int i = 0; i < folders.length; i++) { + entries[i] = folders[i].getName(); + } + return entries; + } + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/CheckSchemaOperation.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/CheckSchemaOperation.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/CheckSchemaOperation.java (working copy) @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.util.db; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; + +import org.apache.commons.io.IOUtils; +import org.apache.jackrabbit.util.Text; + +/** + * An operation which synchronously checks the DB schema in the {@link #run()} method. The + * {@link #addVariableReplacement(String, String)} method return the instance to enable method chaining. + */ +public class CheckSchemaOperation { + + public static final String SCHEMA_OBJECT_PREFIX_VARIABLE = "${schemaObjectPrefix}"; + + public static final String TABLE_SPACE_VARIABLE = "${tableSpace}"; + + private final ConnectionHelper conHelper; + + private final InputStream ddl; + + private final String table; + + private final Map varReplacement = new HashMap(); + + /** + * @param connectionhelper the connection helper + * @param ddlStream the stream of the DDL to use to create the schema if necessary (closed by the + * {@link #run()} method) + * @param tableName the name of the table to use for the schema-existence-check + */ + public CheckSchemaOperation(ConnectionHelper connectionhelper, InputStream ddlStream, String tableName) { + conHelper = connectionhelper; + ddl = ddlStream; + table = tableName; + } + + /** + * Adds a variable replacement mapping. + * + * @param var the variable + * @param replacement the replacement value + * @return this + */ + public CheckSchemaOperation addVariableReplacement(String var, String replacement) { + varReplacement.put(var, replacement); + return this; + } + + /** + * Checks if the required schema objects exist and creates them if they don't exist yet. + * + * @throws SQLException if an error occurs + * @throws IOException if an error occurs + */ + public void run() throws SQLException, IOException { + try { + if (!conHelper.tableExists(table)) { + BufferedReader reader = new BufferedReader(new InputStreamReader(ddl)); + String sql = reader.readLine(); + while (sql != null) { + // Skip comments and empty lines + if (!sql.startsWith("#") && sql.length() > 0) { + // replace prefix variable + sql = replace(sql); + // execute sql stmt + conHelper.exec(sql); + } + // read next sql stmt + sql = reader.readLine(); + } + } + } finally { + IOUtils.closeQuietly(ddl); + } + } + + /** + * Applies the variable replacement to the given string. + * + * @param sql the string in which to replace variables + * @return the new string + */ + private String replace(String sql) { + String result = sql; + for (Map.Entry entry : varReplacement.entrySet()) { + result = Text.replace(result, entry.getKey(), entry.getValue()).trim(); + } + return result; + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/ConnectionFactory.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/ConnectionFactory.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/ConnectionFactory.java (working copy) @@ -0,0 +1,377 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.util.db; + +import java.sql.Connection; +import java.sql.Driver; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.jcr.RepositoryException; +import javax.naming.Context; +import javax.naming.NamingException; +import javax.sql.DataSource; + +import org.apache.commons.dbcp.BasicDataSource; +import org.apache.commons.dbcp.DelegatingConnection; +import org.apache.commons.pool.impl.GenericObjectPool; +import org.apache.jackrabbit.core.config.DataSourceConfig; +import org.apache.jackrabbit.core.config.DataSourceConfig.DataSourceDefinition; +import org.apache.jackrabbit.core.util.db.DataSourceWrapper; +import org.apache.jackrabbit.util.Base64; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A factory for new database connections. + * Supported are regular JDBC drivers, as well as + * JNDI resources. + * + * FIXME: the registry currently is ClassLoader wide. I.e., if you start two repositories + * then you share the registered datasources... + */ +public final class ConnectionFactory { + + private static final Logger log = LoggerFactory.getLogger(ConnectionFactory.class); + + /** + * The lock to protect the fields of this class. + */ + private final Object lock = new Object(); + + /** + * The data sources without logical name. The keys in the map are based on driver-url-user combination. + */ + private final Map keyToDataSource = new HashMap(); + + /** + * The configured data sources with logical name. The keys in the map are the logical name. + */ + private final Map nameToDataSource = new HashMap(); + + /** + * The configured data source defs. The keys in the map are the logical name. + */ + private final Map nameToDataSourceDef = new HashMap(); + + /** + * The list of data sources created by this factory. + */ + private final List created = new ArrayList(); + + private boolean closed = false; + + /** + * Registers a number of data sources. + * + * @param dsc the {@link DataSourceConfig} which contains the configuration + */ + public void registerDataSources(DataSourceConfig dsc) throws RepositoryException { + synchronized (lock) { + sanityCheck(); + for (DataSourceDefinition def : dsc.getDefinitions()) { + Class driverClass = getDriverClass(def.getDriver()); + if (driverClass != null + && Context.class.isAssignableFrom(driverClass)) { + DataSource ds = getJndiDataSource((Class) driverClass, def.getUrl()); + nameToDataSource.put(def.getLogicalName(), ds); + nameToDataSourceDef.put(def.getLogicalName(), def); + } else { + BasicDataSource bds = + getDriverDataSource(driverClass, def.getUrl(), def.getUser(), def.getPassword()); + if (def.getMaxPoolSize() > 0) { + bds.setMaxActive(def.getMaxPoolSize()); + } + if (def.getValidationQuery() != null && !"".equals(def.getValidationQuery().trim())) { + bds.setValidationQuery(def.getValidationQuery()); + } + nameToDataSource.put(def.getLogicalName(), bds); + nameToDataSourceDef.put(def.getLogicalName(), def); + } + } + } + } + + /** + * Retrieves a configured data source by logical name. + * + * @param logicalName the name of the {@code DataSource} + * @return a {@code DataSource} + * @throws RepositoryException if there is no {@code DataSource} with the given name + */ + public DataSource getDataSource(String logicalName) throws RepositoryException { + synchronized (lock) { + sanityCheck(); + DataSource ds = nameToDataSource.get(logicalName); + if (ds == null) { + throw new RepositoryException("DataSource with logicalName " + logicalName + + " has not been configured"); + } + return ds; + } + } + + /** + * @param logicalName the name of the {@code DataSource} + * @return the configured database type + * @throws RepositoryException if there is no {@code DataSource} with the given name + */ + public String getDataBaseType(String logicalName) throws RepositoryException { + synchronized (lock) { + sanityCheck(); + DataSourceDefinition def = nameToDataSourceDef.get(logicalName); + if (def == null) { + throw new RepositoryException("DataSource with logicalName " + logicalName + + " has not been configured"); + } + return def.getDbType(); + } + } + + /** + * Retrieve a {@code DataSource} for the specified properties. + * This can be a JNDI Data Source as well. To do that, + * the driver class name must reference a {@code javax.naming.Context} class + * (for example {@code javax.naming.InitialContext}), and the URL must be the JNDI URL + * (for example {@code java:comp/env/jdbc/Test}). + * + * @param driver the JDBC driver or the Context class + * @param url the database URL + * @param user the user name + * @param password the password + * @return the {@code DataSource} + * @throws RepositoryException if the driver could not be loaded + * @throws SQLException if the connection could not be established + */ + public DataSource getDataSource(String driver, String url, String user, String password) + throws RepositoryException, SQLException { + final String key = driver + url + user; + synchronized(lock) { + sanityCheck(); + DataSource ds = keyToDataSource.get(key); + if (ds == null) { + ds = createDataSource( + driver, url, user, Base64.decodeIfEncoded(password)); + keyToDataSource.put(key, ds); + } + return ds; + } + } + + /** + * + */ + public void close() { + synchronized(lock) { + sanityCheck(); + for (BasicDataSource ds : created) { + try { + ds.close(); + } catch (SQLException e) { + log.error("failed to close " + ds, e); + } + } + keyToDataSource.clear(); + nameToDataSource.clear(); + nameToDataSourceDef.clear(); + created.clear(); + closed = true; + } + } + + /** + * Needed for pre-10R2 Oracle blob support....:( + * + * This method actually assumes that we are using commons DBCP 1.2.2. + * + * @param con the commons-DBCP {@code DelegatingConnection} to unwrap + * @return the unwrapped connection + */ + public static Connection unwrap(Connection con) throws SQLException { + if (con instanceof DelegatingConnection) { + return ((DelegatingConnection)con).getInnermostDelegate(); + } else { + throw new SQLException("failed to unwrap connection of class " + con.getClass().getName() + + ", expected it to be a " + DelegatingConnection.class.getName()); + } + } + + private void sanityCheck() { + if (closed) { + throw new IllegalStateException("this factory has already been closed"); + } + } + + /** + * Create a new pooling data source or finds an existing JNDI data source (depends on driver). + * + * @param driver + * @param url + * @param user + * @param password + * @return + * @throws RepositoryException + */ + private DataSource createDataSource(String driver, String url, String user, String password) + throws RepositoryException { + Class driverClass = getDriverClass(driver); + if (driverClass != null + && Context.class.isAssignableFrom(driverClass)) { + @SuppressWarnings("unchecked") + DataSource database = getJndiDataSource((Class) driverClass, url); + if (user == null && password == null) { + return database; + } else { + return new DataSourceWrapper(database, user, password); + } + } else { + return getDriverDataSource(driverClass, url, user, password); + } + } + + /** + * Loads and returns the given JDBC driver (or JNDI context) class. + * Returns null if a class name is not given. + * + * @param driver driver class name + * @return driver class, or null + * @throws RepositoryException if the class can not be loaded + */ + private Class getDriverClass(String driver) + throws RepositoryException { + try { + if (driver != null && driver.length() > 0) { + return Class.forName(driver); + } else { + return null; + } + } catch (ClassNotFoundException e) { + throw new RepositoryException( + "Could not load JDBC driver class " + driver, e); + } + } + + /** + * Returns the JDBC {@link DataSource} bound to the given name in + * the JNDI {@link Context} identified by the given class. + * + * @param contextClass class that is instantiated to get the JNDI context + * @param name name of the DataSource within the JNDI context + * @return the DataSource bound in JNDI + * @throws RepositoryException if the JNDI context can not be accessed, + * or if the named DataSource is not found + */ + private DataSource getJndiDataSource( + Class contextClass, String name) + throws RepositoryException { + try { + Object object = contextClass.newInstance().lookup(name); + if (object instanceof DataSource) { + return (DataSource) object; + } else { + throw new RepositoryException( + "Object " + object + " with JNDI name " + + name + " is not a JDBC DataSource"); + } + } catch (InstantiationException e) { + throw new RepositoryException( + "Invalid JNDI context: " + contextClass.getName(), e); + } catch (IllegalAccessException e) { + throw new RepositoryException( + "Invalid JNDI context: " + contextClass.getName(), e); + } catch (NamingException e) { + throw new RepositoryException( + "JNDI name not found: " + name, e); + } + } + + /** + * Creates and returns a pooling JDBC {@link DataSource} for accessing + * the database identified by the given driver class and JDBC + * connection URL. The driver class can be null if + * a specific driver has not been configured. + * + * @param driverClass the JDBC driver class, or null + * @param url the JDBC connection URL + * @return pooling DataSource for accessing the specified database + */ + private BasicDataSource getDriverDataSource( + Class driverClass, String url, String user, String password) { + BasicDataSource ds = new BasicDataSource(); + created.add(ds); + + if (driverClass != null) { + Driver instance = null; + try { + // Workaround for Apache Derby: + // The JDBC specification recommends the Class.forName + // method without the .newInstance() method call, + // but it is required after a Derby 'shutdown' + instance = (Driver) driverClass.newInstance(); + } catch (Throwable e) { + // Ignore exceptions as there's no requirement for + // a JDBC driver class to have a public default constructor + } + if (instance != null) { + if (instance.jdbcCompliant()) { + // JCR-3445 At the moment the PostgreSQL isn't compliant because it doesn't implement this method... + ds.setValidationQueryTimeout(3); + } + } + ds.setDriverClassName(driverClass.getName()); + } + + ds.setUrl(url); + ds.setUsername(user); + ds.setPassword(password); + ds.setDefaultAutoCommit(true); + ds.setTestOnBorrow(false); + ds.setTestWhileIdle(true); + ds.setTimeBetweenEvictionRunsMillis(600000); // 10 Minutes + ds.setMinEvictableIdleTimeMillis(60000); // 1 Minute + ds.setMaxActive(-1); // unlimited + ds.setMaxIdle(GenericObjectPool.DEFAULT_MAX_IDLE + 10); + ds.setValidationQuery(guessValidationQuery(url)); + ds.setAccessToUnderlyingConnectionAllowed(true); + ds.setPoolPreparedStatements(true); + ds.setMaxOpenPreparedStatements(-1); // unlimited + return ds; + } + + private String guessValidationQuery(String url) { + if (url.contains("derby")) { + return "values(1)"; + } else if (url.contains("mysql")) { + return "select 1"; + } else if (url.contains("sqlserver") || url.contains("jtds")) { + return "select 1"; + } else if (url.contains("oracle")) { + return "select 'validationQuery' from dual"; + } else if (url.contains("postgresql")) { + return "select 1"; + } else if (url.contains("h2")) { + return "select 1"; + } else if (url.contains("db2")) { + return "values(1)"; + } + log.warn("Failed to guess validation query for URL " + url); + return null; + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/ConnectionHelper.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/ConnectionHelper.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/ConnectionHelper.java (working copy) @@ -0,0 +1,600 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.util.db; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import javax.sql.DataSource; + +import org.apache.jackrabbit.core.util.db.Oracle10R1ConnectionHelper; +import org.apache.jackrabbit.core.util.db.ResultSetWrapper; +import org.apache.jackrabbit.data.core.TransactionContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class provides convenience methods to execute SQL statements. They can be either executed in isolation + * or within the context of a JDBC transaction; the so-called batch mode (use the {@link #startBatch()} + * and {@link #endBatch(boolean)} methods for this). + * + *

+ * + * This class contains logic to retry execution of SQL statements. If this helper is not in batch mode + * and if a statement fails due to an {@code SQLException}, then it is retried. If the {@code block} argument + * of the constructor call was {@code false} then it is retried only once. Otherwise the statement is retried + * until either it succeeds or the thread is interrupted. This clearly assumes that the only cause of {@code + * SQLExceptions} is faulty {@code Connections} which are restored eventually.
Note: + * This retry logic only applies to the following methods: + *

    + *
  • {@link #exec(String, Object...)}
  • + *
  • {@link #update(String, Object[])}
  • + *
  • {@link #exec(String, Object[], boolean, int)}
  • + *
+ * + *

+ * + * This class is not thread-safe and if it is to be used by multiple threads then the clients must make sure + * that access to this class is properly synchronized. + * + *

+ * + * Implementation note: The {@code Connection} that is retrieved from the {@code DataSource} + * in {@link #getConnection()} may be broken. This is so because if an internal {@code DataSource} is used, + * then this is a commons-dbcp {@code DataSource} with a testWhileIdle validation strategy (see + * the {@code ConnectionFactory} class). Furthermore, if it is a {@code DataSource} obtained through JNDI then we + * can make no assumptions about the validation strategy. This means that our retry logic must either assume that + * the SQL it tries to execute can do so without errors (i.e., the statement is valid), or it must implement its + * own validation strategy to apply. Currently, the former is in place. + */ +public class ConnectionHelper { + + static Logger log = LoggerFactory.getLogger(ConnectionHelper.class); + + private static final int RETRIES = 1; + + private static final int SLEEP_BETWEEN_RETRIES_MS = 100; + + final boolean blockOnConnectionLoss; + + private final boolean checkTablesWithUserName; + + protected final DataSource dataSource; + + private Map batchConnectionMap = Collections.synchronizedMap(new HashMap()); + + /** + * The default fetchSize is '0'. This means the fetchSize Hint will be ignored + */ + private int fetchSize = 0; + + /** + * @param dataSrc the {@link DataSource} on which this instance acts + * @param block whether the helper should transparently block on DB connection loss (otherwise it retries + * once and if that fails throws exception) + */ + public ConnectionHelper(DataSource dataSrc, boolean block) { + dataSource = dataSrc; + checkTablesWithUserName = false; + blockOnConnectionLoss = block; + } + + /** + * @param dataSrc the {@link DataSource} on which this instance acts + * @param checkWithUserName whether the username is to be used for the {@link #tableExists(String)} method + * @param block whether the helper should transparently block on DB connection loss (otherwise it throws exceptions) + */ + protected ConnectionHelper(DataSource dataSrc, boolean checkWithUserName, boolean block) { + dataSource = dataSrc; + checkTablesWithUserName = checkWithUserName; + blockOnConnectionLoss = block; + } + + /** + * @param dataSrc the {@link DataSource} on which this instance acts + * @param checkWithUserName whether the username is to be used for the {@link #tableExists(String)} method + * @param block whether the helper should transparently block on DB connection loss (otherwise it throws exceptions) + * @param fetchSize the fetchSize that will be used per default + */ + protected ConnectionHelper(DataSource dataSrc, boolean checkWithUserName, boolean block, int fetchSize) { + dataSource = dataSrc; + checkTablesWithUserName = checkWithUserName; + blockOnConnectionLoss = block; + this.fetchSize = fetchSize; + } + + /** + * A utility method that makes sure that identifier does only consist of characters that are + * allowed in names on the target database. Illegal characters will be escaped as necessary. + * + * This method is not affected by the + * + * @param identifier the identifier to convert to a db specific identifier + * @return the db-normalized form of the given identifier + * @throws SQLException if an error occurs + */ + public final String prepareDbIdentifier(String identifier) throws SQLException { + if (identifier == null) { + return null; + } + String legalChars = "ABCDEFGHIJKLMNOPQRSTUVWXZY0123456789_"; + legalChars += getExtraNameCharacters(); + String id = identifier.toUpperCase(); + StringBuilder escaped = new StringBuilder(); + for (int i = 0; i < id.length(); i++) { + char c = id.charAt(i); + if (legalChars.indexOf(c) == -1) { + replaceCharacter(escaped, c); + } else { + escaped.append(c); + } + } + return escaped.toString(); + } + + /** + * Called from {@link #prepareDbIdentifier(String)}. Default implementation replaces the illegal + * characters with their hexadecimal encoding. + * + * @param escaped the escaped db identifier + * @param c the character to replace + */ + protected void replaceCharacter(StringBuilder escaped, char c) { + escaped.append("_x"); + String hex = Integer.toHexString(c); + escaped.append("0000".toCharArray(), 0, 4 - hex.length()); + escaped.append(hex); + escaped.append("_"); + } + + /** + * Returns true if we are currently in a batch mode, false otherwise. + * + * @return true if the current thread or the active transaction is running in batch mode, false otherwise. + */ + protected boolean inBatchMode() { + return getTransactionAwareBatchConnection() != null; + } + + /** + * The default implementation returns the {@code extraNameCharacters} provided by the databases metadata. + * + * @return the additional characters for identifiers supported by the db + * @throws SQLException on error + */ + private String getExtraNameCharacters() throws SQLException { + Connection con = dataSource.getConnection(); + try { + DatabaseMetaData metaData = con.getMetaData(); + return metaData.getExtraNameCharacters(); + } finally { + DbUtility.close(con, null, null); + } + } + + /** + * Checks whether the given table exists in the database. + * + * @param tableName the name of the table + * @return whether the given table exists + * @throws SQLException on error + */ + public final boolean tableExists(String tableName) throws SQLException { + Connection con = dataSource.getConnection(); + ResultSet rs = null; + boolean schemaExists = false; + String name = tableName; + try { + DatabaseMetaData metaData = con.getMetaData(); + if (metaData.storesLowerCaseIdentifiers()) { + name = tableName.toLowerCase(); + } else if (metaData.storesUpperCaseIdentifiers()) { + name = tableName.toUpperCase(); + } + String userName = null; + if (checkTablesWithUserName) { + userName = metaData.getUserName(); + } + rs = metaData.getTables(null, userName, name, null); + schemaExists = rs.next(); + } finally { + DbUtility.close(con, null, rs); + } + return schemaExists; + } + + /** + * Starts the batch mode. If an {@link SQLException} is thrown, then the batch mode is not started.

+ * Important: clients that call this method must make sure that + * {@link #endBatch(boolean)} is called eventually. + * + * @throws SQLException on error + */ + public final void startBatch() throws SQLException { + if (inBatchMode()) { + throw new SQLException("already in batch mode"); + } + Connection batchConnection = null; + try { + batchConnection = getConnection(false); + batchConnection.setAutoCommit(false); + setTransactionAwareBatchConnection(batchConnection); + } catch (SQLException e) { + removeTransactionAwareBatchConnection(); + // Strive for failure atomicity + if (batchConnection != null) { + DbUtility.close(batchConnection, null, null); + } + throw e; + } + } + + /** + * This method always ends the batch mode. + * + * @param commit whether the changes in the batch should be committed or rolled back + * @throws SQLException if the commit or rollback of the underlying JDBC Connection threw an {@code + * SQLException} + */ + public final void endBatch(boolean commit) throws SQLException { + if (!inBatchMode()) { + throw new SQLException("not in batch mode"); + } + Connection batchConnection = getTransactionAwareBatchConnection(); + try { + if (commit) { + batchConnection.commit(); + } else { + batchConnection.rollback(); + } + } finally { + removeTransactionAwareBatchConnection(); + if (batchConnection != null) { + DbUtility.close(batchConnection, null, null); + } + } + } + + /** + * Executes a general SQL statement and immediately closes all resources. + * + * Note: We use a Statement if there are no parameters to avoid a problem on + * the Oracle 10g JDBC driver w.r.t. :NEW and :OLD keywords that triggers ORA-17041. + * + * @param sql an SQL statement string + * @param params the parameters for the SQL statement + * @throws SQLException on error + */ + public final void exec(final String sql, final Object... params) throws SQLException { + new RetryManager(params) { + + @Override + protected Void call() throws SQLException { + reallyExec(sql, params); + return null; + } + + }.doTry(); + } + + void reallyExec(String sql, Object... params) throws SQLException { + Connection con = null; + Statement stmt = null; + boolean inBatchMode = inBatchMode(); + try { + con = getConnection(inBatchMode); + if (params == null || params.length == 0) { + stmt = con.createStatement(); + stmt.execute(sql); + } else { + PreparedStatement p = con.prepareStatement(sql); + stmt = p; + execute(p, params); + } + } finally { + closeResources(con, stmt, null, inBatchMode); + } + } + + /** + * Executes an update or delete statement and returns the update count. + * + * @param sql an SQL statement string + * @param params the parameters for the SQL statement + * @return the update count + * @throws SQLException on error + */ + public final int update(final String sql, final Object... params) throws SQLException { + return new RetryManager(params) { + + @Override + protected Integer call() throws SQLException { + return reallyUpdate(sql, params); + } + + }.doTry(); + } + + int reallyUpdate(String sql, Object... params) throws SQLException { + Connection con = null; + PreparedStatement stmt = null; + boolean inBatchMode = inBatchMode(); + try { + con = getConnection(inBatchMode); + stmt = con.prepareStatement(sql); + return execute(stmt, params).getUpdateCount(); + } finally { + closeResources(con, stmt, null, inBatchMode); + } + } + + /** + * Executes a SQL query and returns the {@link ResultSet}. The + * returned {@link ResultSet} should be closed by clients. + * + * @param sql an SQL statement string + * @param params the parameters for the SQL statement + * @return a {@link ResultSet} + */ + public final ResultSet query(String sql, Object... params) throws SQLException { + return exec(sql, params, false, 0); + } + + /** + * Executes a general SQL statement and returns the {@link ResultSet} of the executed statement. The + * returned {@link ResultSet} should be closed by clients. + * + * @param sql an SQL statement string + * @param params the parameters for the SQL statement + * @param returnGeneratedKeys whether generated keys should be returned + * @param maxRows the maximum number of rows in a potential {@link ResultSet} (0 means no limit) + * @return a {@link ResultSet} + * @throws SQLException on error + */ + public final ResultSet exec(final String sql, final Object[] params, final boolean returnGeneratedKeys, + final int maxRows) throws SQLException { + return new RetryManager(params) { + + @Override + protected ResultSet call() throws SQLException { + return reallyExec(sql, params, returnGeneratedKeys, maxRows); + } + + }.doTry(); + } + + ResultSet reallyExec(String sql, Object[] params, boolean returnGeneratedKeys, int maxRows) + throws SQLException { + Connection con = null; + PreparedStatement stmt = null; + ResultSet rs = null; + boolean inBatchMode = inBatchMode(); + try { + con = getConnection(inBatchMode); + if (returnGeneratedKeys) { + stmt = con.prepareStatement(sql, Statement.RETURN_GENERATED_KEYS); + } else { + stmt = con.prepareStatement(sql); + } + stmt.setMaxRows(maxRows); + int currentFetchSize = this.fetchSize; + if (0 < maxRows && maxRows < currentFetchSize) { + currentFetchSize = maxRows; // JCR-3090 + } + stmt.setFetchSize(currentFetchSize); + execute(stmt, params); + if (returnGeneratedKeys) { + rs = stmt.getGeneratedKeys(); + } else { + rs = stmt.getResultSet(); + } + // Don't wrap null + if (rs == null) { + closeResources(con, stmt, rs, inBatchMode); + return null; + } + if (inBatchMode) { + return ResultSetWrapper.newInstance(null, stmt, rs); + } else { + return ResultSetWrapper.newInstance(con, stmt, rs); + } + } catch (SQLException e) { + closeResources(con, stmt, rs, inBatchMode); + throw e; + } + } + + /** + * Gets a connection based on the {@code batchMode} state of this helper. The connection should be closed + * by a call to {@link #closeResources(Connection, Statement, ResultSet)} which also takes the {@code + * batchMode} state into account. + * + * @param inBatchMode indicates if we are in a batchMode + * @return a {@code Connection} to use, based on the batch mode state + * @throws SQLException on error + */ + protected final Connection getConnection(boolean inBatchMode) throws SQLException { + if (inBatchMode) { + return getTransactionAwareBatchConnection(); + } else { + Connection con = dataSource.getConnection(); + // JCR-1013: Setter may fail unnecessarily on a managed connection + if (!con.getAutoCommit()) { + con.setAutoCommit(true); + } + return con; + } + } + + /** + * Returns the Batch Connection. + * + * @return Connection + */ + private Connection getTransactionAwareBatchConnection() { + Object threadId = TransactionContext.getCurrentThreadId(); + return batchConnectionMap.get(threadId); + } + + /** + * Stores the given Connection to the batchConnectionMap. + * If we are running in a XA Environment the globalTransactionId will be used as Key. + * In Non-XA Environment the ThreadName is used. + * + * @param batchConnection + */ + private void setTransactionAwareBatchConnection(Connection batchConnection) { + Object threadId = TransactionContext.getCurrentThreadId(); + batchConnectionMap.put(threadId, batchConnection); + } + + /** + * Removes the Batch Connection from the batchConnectionMap + */ + private void removeTransactionAwareBatchConnection() { + Object threadId = TransactionContext.getCurrentThreadId(); + batchConnectionMap.remove(threadId); + } + + /** + * Closes the given resources given the {@code batchMode} state. + * + * @param con the {@code Connection} obtained through the {@link #getConnection()} method + * @param stmt a {@code Statement} + * @param rs a {@code ResultSet} + * @param inBatchMode indicates if we are in a batchMode + */ + protected final void closeResources(Connection con, Statement stmt, ResultSet rs, boolean inBatchMode) { + if (inBatchMode) { + DbUtility.close(null, stmt, rs); + } else { + DbUtility.close(con, stmt, rs); + } + } + + /** + * This method is used by all methods of this class that execute SQL statements. This default + * implementation sets all parameters and unwraps {@link StreamWrapper} instances. Subclasses may override + * this method to do something special with the parameters. E.g., the {@link Oracle10R1ConnectionHelper} + * overrides it in order to add special blob handling. + * + * @param stmt the {@link PreparedStatement} to execute + * @param params the parameters + * @return the executed statement + * @throws SQLException on error + */ + protected PreparedStatement execute(PreparedStatement stmt, Object[] params) throws SQLException { + for (int i = 0; params != null && i < params.length; i++) { + Object p = params[i]; + if (p instanceof StreamWrapper) { + StreamWrapper wrapper = (StreamWrapper) p; + stmt.setBinaryStream(i + 1, wrapper.getStream(), (int) wrapper.getSize()); + } else { + stmt.setObject(i + 1, p); + } + } + try { + stmt.execute(); + } catch (SQLException e) { + //Reset Stream for retry ... + for (int i = 0; params != null && i < params.length; i++) { + Object p = params[i]; + if (p instanceof StreamWrapper) { + StreamWrapper wrapper = (StreamWrapper) p; + if(!wrapper.resetStream()) { + wrapper.cleanupResources(); + throw new RuntimeException("Unable to reset the Stream."); + } + } + } + throw e; + } + return stmt; + } + + /** + * This class encapsulates the logic to retry a method invocation if it threw an SQLException. + * The RetryManager must cleanup the Params it will get. + * + * @param the return type of the method which is retried if it failed + */ + public abstract class RetryManager { + + private Object[] params; + + public RetryManager(Object[] params) { + this.params = params; + } + + public final T doTry() throws SQLException { + if (inBatchMode()) { + return call(); + } else { + boolean sleepInterrupted = false; + int failures = 0; + SQLException lastException = null; + while (!sleepInterrupted && (blockOnConnectionLoss || failures <= RETRIES)) { + try { + T object = call(); + cleanupParamResources(); + return object; + } catch (SQLException e) { + lastException = e; + } + log.error("Failed to execute SQL (stacktrace on DEBUG log level): " + lastException); + log.debug("Failed to execute SQL", lastException); + failures++; + if (blockOnConnectionLoss || failures <= RETRIES) { // if we're going to try again + try { + Thread.sleep(SLEEP_BETWEEN_RETRIES_MS); + } catch (InterruptedException e1) { + Thread.currentThread().interrupt(); + sleepInterrupted = true; + log.error("Interrupted: canceling retry"); + } + } + } + cleanupParamResources(); + throw lastException; + } + } + + protected abstract T call() throws SQLException; + + /** + * Cleans up the Parameter resources that are not automatically closed or deleted. + * + * @param params + */ + protected void cleanupParamResources() { + for (int i = 0; params != null && i < params.length; i++) { + Object p = params[i]; + if (p instanceof StreamWrapper) { + StreamWrapper wrapper = (StreamWrapper) p; + wrapper.cleanupResources(); + } + } + } + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/DataSourceWrapper.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/DataSourceWrapper.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/DataSourceWrapper.java (working copy) @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.util.db; + +import java.io.PrintWriter; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.logging.Logger; + +import javax.sql.DataSource; + +/** + * This class delegates all calls to the corresponding method on the wrapped {@code DataSource} except for the {@link #getConnection()} method, + * which delegates to {@code DataSource#getConnection(String, String)} with the username and password + * which are given on construction. + */ +public class DataSourceWrapper implements DataSource { + + private final DataSource dataSource; + + private final String username; + + private final String password; + + /** + * @param dataSource the {@code DataSource} to wrap + * @param username the username to use + * @param password the password to use + */ + public DataSourceWrapper(DataSource dataSource, String username, String password) { + this.dataSource = dataSource; + this.username = username; + this.password = password; + } + + /** + * Java 6 method. + * + * {@inheritDoc} + */ + public boolean isWrapperFor(Class arg0) throws SQLException { + throw new UnsupportedOperationException("Java 6 method not supported"); + } + + /** + * Java 6 method. + * + * {@inheritDoc} + */ + public T unwrap(Class arg0) throws SQLException { + throw new UnsupportedOperationException("Java 6 method not supported"); + } + + /** + * Unsupported Java 7 method. + * + * @see JCR-3167 + */ + public Logger getParentLogger() { + throw new UnsupportedOperationException("Java 7 method not supported"); + } + + /** + * {@inheritDoc} + */ + public Connection getConnection() throws SQLException { + return dataSource.getConnection(username, password); + } + + /** + * {@inheritDoc} + */ + public Connection getConnection(String username, String password) throws SQLException { + return dataSource.getConnection(username, password); + } + + /** + * {@inheritDoc} + */ + public PrintWriter getLogWriter() throws SQLException { + return dataSource.getLogWriter(); + } + + /** + * {@inheritDoc} + */ + public int getLoginTimeout() throws SQLException { + return dataSource.getLoginTimeout(); + } + + /** + * {@inheritDoc} + */ + public void setLogWriter(PrintWriter out) throws SQLException { + dataSource.setLogWriter(out); + } + + /** + * {@inheritDoc} + */ + public void setLoginTimeout(int seconds) throws SQLException { + dataSource.setLoginTimeout(seconds); + } + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/DatabaseAware.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/DatabaseAware.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/DatabaseAware.java (working copy) @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.util.db; + +/** + * Bean components (i.e., classes that appear in the repository descriptor) that implement this interface will + * get the repositories {@link ConnectionFactory} instance injected just after construction and before + * initialization. + */ +public interface DatabaseAware { + + /** + * @param connectionFactory + */ + void setConnectionFactory(ConnectionFactory connectionFactory); +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/DbUtility.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/DbUtility.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/DbUtility.java (working copy) @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.util.db; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class contains some database utility methods. + */ +public final class DbUtility { + + private static final Logger LOG = LoggerFactory.getLogger(DbUtility.class); + + /** + * Private constructor for utility class pattern. + */ + private DbUtility() { + } + + /** + * This is a utility method which closes the given resources without throwing exceptions. Any exceptions + * encountered are logged instead. + * + * @param rs the {@link ResultSet} to close, may be null + */ + public static void close(ResultSet rs) { + close(null, null, rs); + } + + /** + * This is a utility method which closes the given resources without throwing exceptions. Any exceptions + * encountered are logged instead. + * + * @param con the {@link Connection} to close, may be null + * @param stmt the {@link Statement} to close, may be null + * @param rs the {@link ResultSet} to close, may be null + */ + public static void close(Connection con, Statement stmt, ResultSet rs) { + try { + if (rs != null) { + rs.close(); + } + } catch (SQLException e) { + logException("failed to close ResultSet", e); + } finally { + try { + if (stmt != null) { + stmt.close(); + } + } catch (SQLException e) { + logException("failed to close Statement", e); + } finally { + try { + if (con != null && !con.isClosed()) { + con.close(); + } + } catch (SQLException e) { + logException("failed to close Connection", e); + } + } + } + } + + /** + * Logs an SQL exception on error level, and debug level (more detail). + * + * @param message the message + * @param e the exception + */ + public static void logException(String message, SQLException e) { + if (message != null) { + LOG.error(message); + } + LOG.error(" Reason: " + e.getMessage()); + LOG.error(" State/Code: " + e.getSQLState() + "/" + e.getErrorCode()); + LOG.debug(" dump:", e); + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/DerbyConnectionHelper.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/DerbyConnectionHelper.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/DerbyConnectionHelper.java (working copy) @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.util.db; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; + +import javax.sql.DataSource; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * + */ +public final class DerbyConnectionHelper extends ConnectionHelper { + + /** name of the embedded driver */ + public static final String DERBY_EMBEDDED_DRIVER = "org.apache.derby.jdbc.EmbeddedDriver"; + + private static Logger log = LoggerFactory.getLogger(DerbyConnectionHelper.class); + + /** + * @param dataSrc the {@code DataSource} on which this helper acts + * @param block whether to block on connection loss until the db is up again + */ + public DerbyConnectionHelper(DataSource dataSrc, boolean block) { + super(dataSrc, block); + } + + /** + * Shuts the embedded Derby database down. + * + * @param driver the driver + * @throws SQLException on failure + */ + public void shutDown(String driver) throws SQLException { + // check for embedded driver + if (!DERBY_EMBEDDED_DRIVER.equals(driver)) { + return; + } + + // prepare connection url for issuing shutdown command + String url = null; + Connection con = null; + + try { + con = dataSource.getConnection(); + try { + url = con.getMetaData().getURL(); + } catch (SQLException e) { + // JCR-1557: embedded derby db probably already shut down; + // this happens when configuring multiple FS/PM instances + // to use the same embedded derby db instance. + log.debug("failed to retrieve connection url: embedded db probably already shut down", e); + return; + } + // we have to reset the connection to 'autoCommit=true' before closing it; + // otherwise Derby would mysteriously complain about some pending uncommitted + // changes which can't possibly be true. + // @todo further investigate + con.setAutoCommit(true); + } + finally { + DbUtility.close(con, null, null); + } + int pos = url.lastIndexOf(';'); + if (pos != -1) { + // strip any attributes from connection url + url = url.substring(0, pos); + } + url += ";shutdown=true"; + + // now it's safe to shutdown the embedded Derby database + try { + DriverManager.getConnection(url); + } catch (SQLException e) { + // a shutdown command always raises a SQLException + log.info(e.getMessage()); + } + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/Oracle10R1ConnectionHelper.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/Oracle10R1ConnectionHelper.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/Oracle10R1ConnectionHelper.java (working copy) @@ -0,0 +1,168 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.util.db; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.lang.reflect.Method; +import java.sql.Blob; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; + +import javax.sql.DataSource; + +import org.apache.commons.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The connection helper for Oracle databases of version up to 10.1. It has special blob handling. + */ +public final class Oracle10R1ConnectionHelper extends OracleConnectionHelper { + + /** + * the default logger + */ + private static Logger log = LoggerFactory.getLogger(Oracle10R1ConnectionHelper.class); + + private Class blobClass; + + private Integer durationSessionConstant; + + private Integer modeReadWriteConstant; + + /** + * @param dataSrc the {@code DataSource} on which this helper acts + * @param block whether to block on connection loss until the db is up again + */ + public Oracle10R1ConnectionHelper(DataSource dataSrc, boolean block) { + super(dataSrc, block); + } + + /** + * Retrieve the oracle.sql.BLOB class via reflection, and initialize the values for the + * DURATION_SESSION and MODE_READWRITE constants defined there. + * + * @see oracle.sql.BLOB#DURATION_SESSION + * @see oracle.sql.BLOB#MODE_READWRITE + */ + @Override + public void init() throws Exception { + super.init(); + // initialize oracle.sql.BLOB class & constants + + // use the Connection object for using the exact same + // class loader that the Oracle driver was loaded with + Connection con = null; + try { + con = dataSource.getConnection(); + blobClass = con.getClass().getClassLoader().loadClass("oracle.sql.BLOB"); + durationSessionConstant = new Integer(blobClass.getField("DURATION_SESSION").getInt(null)); + modeReadWriteConstant = new Integer(blobClass.getField("MODE_READWRITE").getInt(null)); + } finally { + if (con != null) { + DbUtility.close(con, null, null); + } + } + } + + /** + * Wraps any input-stream parameters in temporary blobs and frees these again after the statement + * has been executed. + * + * {@inheritDoc} + */ + @Override + protected PreparedStatement execute(PreparedStatement stmt, Object[] params) throws SQLException { + List tmpBlobs = new ArrayList(); + try { + for (int i = 0; params != null && i < params.length; i++) { + Object p = params[i]; + if (p instanceof StreamWrapper) { + StreamWrapper wrapper = (StreamWrapper) p; + Blob tmp = createTemporaryBlob(stmt.getConnection(), wrapper.getStream()); + tmpBlobs.add(tmp); + stmt.setBlob(i + 1, tmp); + } else if (p instanceof InputStream) { + Blob tmp = createTemporaryBlob(stmt.getConnection(), (InputStream) p); + tmpBlobs.add(tmp); + stmt.setBlob(i + 1, tmp); + } else { + stmt.setObject(i + 1, p); + } + } + stmt.execute(); + return stmt; + } catch (Exception e) { + throw new SQLException(e.getMessage()); + } finally { + for (Blob blob : tmpBlobs) { + try { + freeTemporaryBlob(blob); + } catch (Exception e) { + log.warn("Could not close temporary blob", e); + } + } + } + } + + /** + * Creates a temporary oracle.sql.BLOB instance via reflection and spools the contents of the specified + * stream. + */ + private Blob createTemporaryBlob(Connection con, InputStream in) throws Exception { + /* + * BLOB blob = BLOB.createTemporary(con, false, BLOB.DURATION_SESSION); + * blob.open(BLOB.MODE_READWRITE); OutputStream out = blob.getBinaryOutputStream(); ... out.flush(); + * out.close(); blob.close(); return blob; + */ + Method createTemporary = + blobClass.getMethod("createTemporary", new Class[]{Connection.class, Boolean.TYPE, Integer.TYPE}); + Object blob = + createTemporary.invoke(null, new Object[]{ConnectionFactory.unwrap(con), Boolean.FALSE, + durationSessionConstant}); + Method open = blobClass.getMethod("open", new Class[]{Integer.TYPE}); + open.invoke(blob, new Object[]{modeReadWriteConstant}); + Method getBinaryOutputStream = blobClass.getMethod("getBinaryOutputStream", new Class[0]); + OutputStream out = (OutputStream) getBinaryOutputStream.invoke(blob); + try { + IOUtils.copy(in, out); + } finally { + try { + out.flush(); + } catch (IOException ioe) { + } + out.close(); + } + Method close = blobClass.getMethod("close", new Class[0]); + close.invoke(blob); + return (Blob) blob; + } + + /** + * Frees a temporary oracle.sql.BLOB instance via reflection. + */ + private void freeTemporaryBlob(Blob blob) throws Exception { + // blob.freeTemporary(); + Method freeTemporary = blobClass.getMethod("freeTemporary", new Class[0]); + freeTemporary.invoke(blob); + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/OracleConnectionHelper.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/OracleConnectionHelper.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/OracleConnectionHelper.java (working copy) @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.util.db; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.SQLException; + +import javax.sql.DataSource; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The connection helper for Oracle databases of version 10.2 and later. + */ +public class OracleConnectionHelper extends ConnectionHelper { + + /** + * the default logger + */ + private static Logger log = LoggerFactory.getLogger(OracleConnectionHelper.class); + + /** + * @param dataSrc the {@code DataSource} on which this helper acts + * @param block whether to block on connection loss until the db is up again + */ + public OracleConnectionHelper(DataSource dataSrc, boolean block) { + super(dataSrc, true, block); + } + + /** + * Initializes the helper: checks for valid driver version. + * Subclasses that override this method should still call it! + * + * @throws Exception on error + */ + public void init() throws Exception { + // check driver version + Connection connection = dataSource.getConnection(); + try { + DatabaseMetaData metaData = connection.getMetaData(); + if (metaData.getDriverMajorVersion() < 10) { + // Oracle drivers prior to version 10 only support + // writing BLOBs up to 32k in size... + log.warn("Unsupported driver version detected: " + + metaData.getDriverName() + + " v" + metaData.getDriverVersion()); + } + } catch (SQLException e) { + log.warn("Can not retrieve driver version", e); + } finally { + DbUtility.close(connection, null, null); + } + } + + /** + * Since Oracle only supports table names up to 30 characters in + * length illegal characters are simply replaced with "_" rather than + * escaping them with "_x0000_". + * + * {@inheritDoc} + */ + @Override + protected final void replaceCharacter(StringBuilder escaped, char c) { + escaped.append("_"); + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/PostgreSQLConnectionHelper.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/PostgreSQLConnectionHelper.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/PostgreSQLConnectionHelper.java (working copy) @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.util.db; + +import javax.sql.DataSource; + + +/** + * The connection helper for PSQL databases. It has special fetch size handling. + */ +public final class PostgreSQLConnectionHelper extends ConnectionHelper { + + /** + * @param dataSrc the {@code DataSource} on which this helper acts + * @param block whether to block on connection loss until the db is up again + */ + public PostgreSQLConnectionHelper(DataSource dataSrc, boolean block) { + super(dataSrc, false, block, 10000); + } + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/ResultSetWrapper.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/ResultSetWrapper.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/ResultSetWrapper.java (working copy) @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.util.db; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; + + +/** + * This is a dynamic proxy in order to support both Java 5 and 6. + */ +public final class ResultSetWrapper implements InvocationHandler { + + private final Connection connection; + + private final Statement statement; + + private final ResultSet resultSet; + + /** + * Creates a new {@code ResultSet} proxy which closes the given {@code Connection} and + * {@code Statement} if it is closed. + * + * @param con the associated {@code Connection} + * @param stmt the associated {@code Statement} + * @param rs the {@code ResultSet} which backs the proxy + * @return a {@code ResultSet} proxy + */ + public static final ResultSet newInstance(Connection con, Statement stmt, ResultSet rs) { + ResultSetWrapper proxy = new ResultSetWrapper(con, stmt, rs); + return (ResultSet) Proxy.newProxyInstance(rs.getClass().getClassLoader(), + new Class[]{ResultSet.class}, proxy); + } + + private ResultSetWrapper(Connection con, Statement stmt, ResultSet rs) { + connection = con; + statement = stmt; + resultSet = rs; + } + + /** + * {@inheritDoc} + */ + public Object invoke(Object proxy, Method m, Object[] args) throws Throwable { + if ("close".equals(m.getName())) { + DbUtility.close(connection, statement, resultSet); + return null; + } else { + return m.invoke(resultSet, args); + } + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/StreamWrapper.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/StreamWrapper.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/core/util/db/StreamWrapper.java (working copy) @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.core.util.db; + +import java.io.BufferedInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.sql.SQLException; + +import org.apache.jackrabbit.core.data.db.TempFileInputStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class StreamWrapper { + + static Logger log = LoggerFactory.getLogger(StreamWrapper.class); + + private InputStream stream; + private final long size; + + /** + * Creates a wrapper for the given InputStream that can + * safely be passed as a parameter to the {@link ConnectionHelper#exec(String, Object...)}, + * {@link ConnectionHelper#exec(String, Object[], boolean, int)} and + * {@link ConnectionHelper#update(String, Object[])} methods. + * If the wrapped Stream is a {@link TempFileInputStream} it will be wrapped again by a {@link BufferedInputStream}. + * + * @param in the InputStream to wrap + * @param size the size of the input stream + */ + public StreamWrapper(InputStream in, long size) { + this.stream = in; + this.size = size; + } + + public InputStream getStream() { + if (stream instanceof TempFileInputStream) { + return new BufferedInputStream(stream); + } + return stream; + } + + public long getSize() { + return size; + } + + /** + * Cleans up the internal Resources + */ + public void cleanupResources() { + if (stream instanceof TempFileInputStream) { + try { + stream.close(); + ((TempFileInputStream) stream).deleteFile(); + } catch (IOException e) { + log.warn("Unable to cleanup the TempFileInputStream"); + } + } + } + + /** + * Resets the internal InputStream that it could be re-read.
+ * Is used from {@link RetryManager} if a {@link SQLException} has occurred.
+ * At the moment only a {@link TempFileInputStream} can be reseted. + * + * @return returns true if it was able to reset the Stream + */ + public boolean resetStream() { + if (stream instanceof TempFileInputStream) { + try { + TempFileInputStream tempFileInputStream = (TempFileInputStream) stream; + // Close it if it is not already closed ... + tempFileInputStream.close(); + stream = new TempFileInputStream(tempFileInputStream.getFile(), true); + return true; + } catch (Exception e) { + log.warn("Failed to create a new TempFileInputStream", e); + } + } + return false; + } +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/data/core/InternalXAResource.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/data/core/InternalXAResource.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/data/core/InternalXAResource.java (working copy) @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.data.core; + + + +/** + * Interface implemented by resources that provide XA functionality. + */ +public interface InternalXAResource { + + /** + * Associate this resource with a transaction. All further operations on + * the object should be interpreted as part of this transaction and changes + * recorded in some attribute of the transaction context. + * @param tx transaction context, if null disassociate + */ + void associate(TransactionContext tx); + + /** + * Invoked before one of the {@link #prepare}, {@link #commit} or + * {@link #rollback} method is called. + * @param tx transaction context + */ + void beforeOperation(TransactionContext tx); + + /** + * Prepare transaction. The transaction is identified by a transaction + * context. + * @param tx transaction context + * @throws TransactionException if an error occurs + */ + void prepare(TransactionContext tx) throws TransactionException; + + /** + * Commit transaction. The transaction is identified by a transaction + * context. If the method throws, other resources get their changes + * rolled back. + * @param tx transaction context + * @throws TransactionException if an error occurs + */ + void commit(TransactionContext tx) throws TransactionException; + + /** + * Rollback transaction. The transaction is identified by a transaction + * context. + * @param tx transaction context. + */ + void rollback(TransactionContext tx) throws TransactionException; + + /** + * Invoked after one of the {@link #prepare}, {@link #commit} or + * {@link #rollback} method has been called. + * @param tx transaction context + */ + void afterOperation(TransactionContext tx); + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/data/core/TransactionContext.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/data/core/TransactionContext.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/data/core/TransactionContext.java (working copy) @@ -0,0 +1,376 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.data.core; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import javax.transaction.xa.XAException; +import javax.transaction.xa.Xid; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Represents the transaction on behalf of the component that wants to + * explicitly demarcate transaction boundaries. After having been prepared, + * schedules a task that rolls back the transaction if some time passes without + * any further action. This will guarantee that global objects locked by one + * of the resources' {@link InternalXAResource#prepare} method, are eventually + * unlocked. + */ +public class TransactionContext { + + /** + * Logger instance. + */ + private static final Logger log = LoggerFactory.getLogger(TransactionContext.class); + + private static final int STATUS_PREPARING = 1; + private static final int STATUS_PREPARED = 2; + private static final int STATUS_COMMITTING = 3; + private static final int STATUS_COMMITTED = 4; + private static final int STATUS_ROLLING_BACK = 5; + private static final int STATUS_ROLLED_BACK = 6; + + /** + * The per thread associated Xid + */ + private static final ThreadLocal CURRENT_XID = new ThreadLocal(); + + /** + * Transactional resources. + */ + private final InternalXAResource[] resources; + + /** + * The Xid + */ + private final Xid xid; + + /** + * Transaction attributes. + */ + private final Map attributes = new HashMap(); + + /** + * Status. + */ + private int status; + + /** + * Flag indicating whether the association is currently suspended. + */ + private boolean suspended; + + /** + * Create a new instance of this class. + * + * @param xid associated xid + * @param resources transactional resources + */ + public TransactionContext(Xid xid, InternalXAResource[] resources) { + this.xid = xid; + this.resources = resources; + } + + /** + * Set an attribute on this transaction. If the value specified is + * null, it is semantically equivalent to + * {@link #removeAttribute}. + * + * @param name attribute name + * @param value attribute value + */ + public void setAttribute(String name, Object value) { + if (value == null) { + removeAttribute(name); + } + attributes.put(name, value); + } + + /** + * Return an attribute value on this transaction. + * + * @param name attribute name + * @return attribute value, null if no attribute with that + * name exists + */ + public Object getAttribute(String name) { + return attributes.get(name); + } + + /** + * Remove an attribute on this transaction. + * + * @param name attribute name + */ + public void removeAttribute(String name) { + attributes.remove(name); + } + + /** + * Prepare the transaction identified by this context. Prepares changes on + * all resources. If some resource reports an error on prepare, + * automatically rollback changes on all other resources. Throw exception + * at the end if errors were found. + * + * @throws XAException if an error occurs + */ + public synchronized void prepare() throws XAException { + bindCurrentXid(); + status = STATUS_PREPARING; + beforeOperation(); + + TransactionException txe = null; + for (int i = 0; i < resources.length; i++) { + try { + resources[i].prepare(this); + } catch (TransactionException e) { + txe = e; + break; + } catch (Exception e) { + txe = new TransactionException("Error while preparing resource " + resources, e); + break; + } + } + + afterOperation(); + status = STATUS_PREPARED; + + if (txe != null) { + // force immediate rollback on error. + try { + rollback(); + } catch (XAException e) { + /* ignore */ + } + XAException e = new XAException(XAException.XA_RBOTHER); + e.initCause(txe); + throw e; + } + } + + /** + * Commit the transaction identified by this context. Commits changes on + * all resources. If some resource reports an error on commit, + * automatically rollback changes on all other resources. Throw + * exception at the end if some commit failed. + * + * @throws XAException if an error occurs + */ + public synchronized void commit() throws XAException { + if (status == STATUS_ROLLED_BACK) { + throw new XAException(XAException.XA_HEURRB); + } + + boolean heuristicCommit = false; + bindCurrentXid(); + status = STATUS_COMMITTING; + beforeOperation(); + + TransactionException txe = null; + for (int i = 0; i < resources.length; i++) { + InternalXAResource resource = resources[i]; + if (txe != null) { + try { + resource.rollback(this); + } catch (Exception e) { + log.warn("Unable to rollback changes on " + resource, e); + } + } else { + try { + resource.commit(this); + heuristicCommit = true; + } catch (TransactionException e) { + txe = e; + } catch (Exception e) { + txe = new TransactionException("Error while committing resource " + resource, e); + } + } + } + afterOperation(); + status = STATUS_COMMITTED; + + cleanCurrentXid(); + + if (txe != null) { + XAException e = null; + if (heuristicCommit) { + e = new XAException(XAException.XA_HEURMIX); + } else { + e = new XAException(XAException.XA_HEURRB); + } + e.initCause(txe); + throw e; + } + } + + /** + * Rollback the transaction identified by this context. Rolls back changes + * on all resources. Throws exception at the end if errors were found. + * @throws XAException if an error occurs + */ + public synchronized void rollback() throws XAException { + if (status == STATUS_ROLLED_BACK) { + throw new XAException(XAException.XA_RBOTHER); + } + bindCurrentXid(); + status = STATUS_ROLLING_BACK; + beforeOperation(); + + int errors = 0; + for (int i = 0; i < resources.length; i++) { + InternalXAResource resource = resources[i]; + try { + resource.rollback(this); + } catch (Exception e) { + log.warn("Unable to rollback changes on " + resource, e); + errors++; + } + } + afterOperation(); + status = STATUS_ROLLED_BACK; + + cleanCurrentXid(); + + if (errors != 0) { + throw new XAException(XAException.XA_RBOTHER); + } + } + + /** + * Invoke all of the registered resources' {@link InternalXAResource#beforeOperation} + * methods. + */ + private void beforeOperation() { + for (int i = 0; i < resources.length; i++) { + resources[i].beforeOperation(this); + } + } + + /** + * Invoke all of the registered resources' {@link InternalXAResource#afterOperation} + * methods. + */ + private void afterOperation() { + for (int i = 0; i < resources.length; i++) { + resources[i].afterOperation(this); + } + } + + /** + * Return a flag indicating whether the association is suspended. + * + * @return true if the association is suspended; + * false otherwise + */ + public boolean isSuspended() { + return suspended; + } + + /** + * Set a flag indicating whether the association is suspended. + * + * @param suspended flag whether that the association is suspended. + */ + public void setSuspended(boolean suspended) { + this.suspended = suspended; + } + + /** + * Helper Method to bind the {@link Xid} associated with this {@link TransactionContext} + * to the {@link #CURRENT_XID} ThreadLocal. + */ + private void bindCurrentXid() { + CURRENT_XID.set(xid); + } + + /** + * Helper Method to clean the {@link Xid} associated with this {@link TransactionContext} + * from the {@link #CURRENT_XID} ThreadLocal. + */ + private void cleanCurrentXid() { + CURRENT_XID.set(null); + } + + /** + * Returns the {@link Xid} bind to the {@link #CURRENT_XID} ThreadLocal + * @return current Xid or null + */ + private static Xid getCurrentXid() { + return CURRENT_XID.get(); + } + + /** + * Returns the current thread identifier. The identifier is either the + * current thread instance or the global transaction identifier wrapped + * in a {@link XidWrapper}, when running under a transaction. + * + * @return current thread identifier + */ + public static Object getCurrentThreadId() { + Xid xid = TransactionContext.getCurrentXid(); + if (xid != null) { + return new XidWrapper(xid.getGlobalTransactionId()); + } else { + return Thread.currentThread(); + } + } + + /** + * Compares the given thread identifiers for equality. + * + * @see #getCurrentThreadId() + */ + public static boolean isSameThreadId(Object a, Object b) { + if (a == b) { + return true; + } else if (a != null) { + return a.equals(b); + } else { + return false; + } + } + + /** + * Wrapper around a global transaction id (byte[]) + * that handles hashCode and equals in a proper way. + */ + private static class XidWrapper { + private byte[] gtid; + + public XidWrapper(byte[] gtid) { + this.gtid = gtid; + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof XidWrapper)) { + return false; + } + return Arrays.equals((byte[]) gtid, ((XidWrapper)other).gtid); + } + + @Override + public int hashCode() { + return Arrays.hashCode(gtid); + } + } + +} Index: jackrabbit-data/src/main/java/org/apache/jackrabbit/data/core/TransactionException.java =================================================================== --- jackrabbit-data/src/main/java/org/apache/jackrabbit/data/core/TransactionException.java (revision 0) +++ jackrabbit-data/src/main/java/org/apache/jackrabbit/data/core/TransactionException.java (working copy) @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.data.core; + +/** + * TransactionException is thrown when some operation inside the transaction + * fails. + */ +public class TransactionException extends Exception { + + /** + * Creates an instance of this class. Takes a detail message as parameter. + * + * @param message message + */ + public TransactionException(String message) { + super(message); + } + + /** + * Creates an instance of this class. Takes a message and a root throwable + * as parameter. + * + * @param message message + * @param rootCause root throwable + */ + public TransactionException(String message, Throwable rootCause) { + super(message, rootCause); + } +} Index: jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/azure.properties =================================================================== --- jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/azure.properties (revision 0) +++ jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/azure.properties (working copy) @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +driver=com.microsoft.sqlserver.jdbc.SQLServerDriver +createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA IMAGE) Index: jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/db2.properties =================================================================== --- jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/db2.properties (revision 0) +++ jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/db2.properties (working copy) @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +driver=COM.ibm.db2.jdbc.net.DB2Driver +createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY NOT NULL, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA BLOB(1000M)) Index: jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/derby.properties =================================================================== --- jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/derby.properties (revision 0) +++ jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/derby.properties (working copy) @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Tested with Apache Derby 10.3.1.4 on Windows XP (2007-12-11) +driver=org.apache.derby.jdbc.EmbeddedDriver \ No newline at end of file Index: jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/h2.properties =================================================================== --- jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/h2.properties (revision 0) +++ jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/h2.properties (working copy) @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Tested with H2 1.0.63 on Windows XP (2007-12-11) +driver=org.h2.Driver +storeStream=-1 \ No newline at end of file Index: jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/ingres.properties =================================================================== --- jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/ingres.properties (revision 0) +++ jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/ingres.properties (working copy) @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +driver=com.ingres.jdbc.IngresDriver +createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY NOT NULL, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA LONG BYTE) Index: jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/mssql.properties =================================================================== --- jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/mssql.properties (revision 0) +++ jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/mssql.properties (working copy) @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +driver=com.microsoft.sqlserver.jdbc.SQLServerDriver +createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA IMAGE) Index: jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/mysql.properties =================================================================== --- jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/mysql.properties (revision 0) +++ jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/mysql.properties (working copy) @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Tested with MySQL 5.0.27-community-nt on Windows XP (2007-12-11) +# currently, the objects must fit in memory +driver=com.mysql.jdbc.Driver +createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA BLOB(2147483647)) Index: jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/oracle.properties =================================================================== --- jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/oracle.properties (revision 0) +++ jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/oracle.properties (working copy) @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Tested with Oracle Database 10g Release 10.2.0.1.0 on Windows XP (2008-04-29) +driver=oracle.jdbc.OracleDriver +createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY, LENGTH NUMBER, LAST_MODIFIED NUMBER, DATA BLOB) Index: jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/postgresql.properties =================================================================== --- jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/postgresql.properties (revision 0) +++ jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/postgresql.properties (working copy) @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Tested with PostgreSQL 8.2.4 on Windows XP (2007-12-11) +# currently, the objects must fit in memory +driver=org.postgresql.Driver +table=datastore +createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA BYTEA) Index: jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/sqlserver.properties =================================================================== --- jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/sqlserver.properties (revision 0) +++ jackrabbit-data/src/main/resources/org/apache/jackrabbit/core/data/db/sqlserver.properties (working copy) @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Tested with Microsoft SQL Server 2005 4 on Windows XP (2007-12-11) +driver=com.microsoft.sqlserver.jdbc.SQLServerDriver +createTable=CREATE TABLE ${tablePrefix}${table}(ID VARCHAR(255) PRIMARY KEY, LENGTH BIGINT, LAST_MODIFIED BIGINT, DATA IMAGE) Index: pom.xml =================================================================== --- pom.xml (revision 1564627) +++ pom.xml (working copy) @@ -40,6 +40,7 @@ jackrabbit-api jackrabbit-jcr-commons jackrabbit-jcr-tests + jackrabbit-data jackrabbit-core jackrabbit-webdav jackrabbit-jcr-server