Index: jackrabbit-aws-ext/pom.xml =================================================================== --- jackrabbit-aws-ext/pom.xml (revision 0) +++ jackrabbit-aws-ext/pom.xml (working copy) @@ -0,0 +1,90 @@ + + + + + + 4.0.0 + + + + + + org.apache.jackrabbit + jackrabbit-parent + 2.8-SNAPSHOT + ../jackrabbit-parent/pom.xml + + jackrabbit-aws-ext + Jackrabbit AWS Extension + Jackrabbit extenstion to Amazon Webservices + + + + + + + javax.jcr + jcr + + + org.apache.jackrabbit + jackrabbit-jcr-commons + ${project.version} + + + com.amazonaws + aws-java-sdk + 1.5.6 + + + org.apache.jackrabbit + jackrabbit-core + ${project.version} + + + org.slf4j + slf4j-api + 1.7.5 + + + + junit + junit + test + + + org.slf4j + slf4j-log4j12 + 1.7.5 + test + + + org.apache.jackrabbit + jackrabbit-core + ${project.version} + test-jar + + + + + + maven-surefire-plugin + + + **/aws/**/TestAll.java + + + + + + Index: jackrabbit-aws-ext/README.txt =================================================================== --- jackrabbit-aws-ext/README.txt (revision 0) +++ jackrabbit-aws-ext/README.txt (working copy) @@ -0,0 +1,28 @@ +==================================================== +Welcome to Jackrabbit Amazon WebServices Extension +==================================================== + +This is the Amazon Webservices Extension component of the Apache Jackrabbit project. +This component contains S3 Datastore which stores binaries on Amazon S3 (http://aws.amazon.com/s3). + +==================================================== +Build Instructions +==================================================== +To build the latest SNAPSHOT versions of all the components +included here, run the following command with Maven 3: + + mvn clean install + +To run testcases which stores in S3 bucket, please pass aws config file via system property. For e.g. + + mvn clean install -DargLine="-Dconfig=/opt/cq/aws.properties" + +Sample aws properties located at src/test/resources/aws.properties + +==================================================== +Configuration Instructions +==================================================== +It require to configure aws.properties to configure S3 Datastore. + + + Index: jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/Backend.java =================================================================== --- jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/Backend.java (revision 0) +++ jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/Backend.java (working copy) @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.aws.ext.ds; + +import java.io.File; +import java.io.InputStream; +import java.util.Iterator; +import java.util.List; + +import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.core.data.DataStoreException; + +/** + * The interface defines the backend which can be plugged into + * {@link CachingDataStore}. + */ +public interface Backend { + + /** + * This method initialize backend with the configuration. + * + * @param store {@link CachingDataStore} + * @param homeDir path of repository home dir. + * @param config path of config property file. + * @throws DataStoreException + */ + void init(CachingDataStore store, String homeDir, String config) + throws DataStoreException; + + /** + * Return inputstream of record identified by identifier. + * + * @param identifier identifier of record. + * @return inputstream of the record. + * @throws DataStoreException if record not found or any error. + */ + InputStream read(DataIdentifier identifier) throws DataStoreException; + + /** + * Return length of record identified by identifier. + * + * @param identifier identifier of record. + * @return length of the record. + * @throws DataStoreException if record not found or any error. + */ + long getLength(DataIdentifier identifier) throws DataStoreException; + + /** + * Return lastModified of record identified by identifier. + * + * @param identifier identifier of record. + * @return lastModified of the record. + * @throws DataStoreException if record not found or any error. + */ + long getLastModified(DataIdentifier identifier) throws DataStoreException; + + /** + * Stores file to backend with identifier used as key. If key pre-exists, it + * updates the timestamp of the key. + * + * @param identifier key of the file + * @param file file that would be stored in backend. + * @throws DataStoreException for any error. + */ + void write(DataIdentifier identifier, File file) throws DataStoreException; + + /** + * Returns identifiers of all records that exists in backend. + * @return iterator consisting of all identifiers + * @throws DataStoreException + */ + Iterator getAllIdentifiers() throws DataStoreException; + + /** + * Update timestamp of record identified by identifier if minModifiedDate is + * greater than record's lastModified else no op. + * + * @throws DataStoreException if record not found. + */ + void touch(DataIdentifier identifier, long minModifiedDate) + throws DataStoreException; + /** + * This method check the existence of record in backend. + * @param identifier identifier to be checked. + * @return true if records exists else false. + * @throws DataStoreException + */ + boolean exists(DataIdentifier identifier) throws DataStoreException; + + /** + * Close backend and release resources like database connection if any. + * @throws DataStoreException + */ + void close() throws DataStoreException; + + /** + * Delete all records which are older than timestamp. + * @param timestamp + * @return list of identifiers which are deleted. + * @throws DataStoreException + */ + List deleteAllOlderThan(long timestamp) throws DataStoreException; + + /** + * Delete record identified by identifier. No-op if identifier not found. + * @param identifier + * @throws DataStoreException + */ + void deleteRecord(DataIdentifier identifier) throws DataStoreException; +} Index: jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/CachingDataRecord.java =================================================================== --- jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/CachingDataRecord.java (revision 0) +++ jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/CachingDataRecord.java (working copy) @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.aws.ext.ds; + +import java.io.InputStream; + +import org.apache.jackrabbit.core.data.AbstractDataRecord; +import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.core.data.DataStoreException; + +/** + * CachingDataRecord which stores reference to {@link CachingDataStore}. This + * class doesn't store any references to attributes but attributes are fetched + * on demand from {@link CachingDataStore}. + */ +public class CachingDataRecord extends AbstractDataRecord { + + private final CachingDataStore store; + + public CachingDataRecord(CachingDataStore store, DataIdentifier identifier) { + super(store, identifier); + this.store = store; + } + + public long getLastModified() { + try { + return store.getLastModified(getIdentifier()); + } catch (DataStoreException dse) { + return 0; + } + } + + public long getLength() throws DataStoreException { + return store.getLength(getIdentifier()); + } + + public InputStream getStream() throws DataStoreException { + return store.getStream(getIdentifier()); + } + +} Index: jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/CachingDataStore.java =================================================================== --- jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/CachingDataStore.java (revision 0) +++ jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/CachingDataStore.java (working copy) @@ -0,0 +1,577 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.jackrabbit.aws.ext.ds; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.UnsupportedEncodingException; +import java.lang.ref.WeakReference; +import java.security.DigestOutputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.WeakHashMap; + +import javax.jcr.RepositoryException; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.apache.jackrabbit.aws.ext.LocalCache; +import org.apache.jackrabbit.core.data.AbstractDataStore; +import org.apache.jackrabbit.core.data.DataIdentifier; +import org.apache.jackrabbit.core.data.DataRecord; +import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.core.data.MultiDataStoreAware; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A caching data store that consists of {@link LocalCache} and {@link Backend}. + * {@link Backend} is single source of truth. All methods first try to fetch + * information from {@link LocalCache}. If record is not available in + * {@link LocalCache}, then it is fetched from {@link Backend} and saved to + * {@link LocalCache} for further access. This class is designed to work without + * {@link LocalCache} and then all information is fetched from {@link Backend}. + * To disable {@link LocalCache} set {@link #setCacheSize(long)} to 0. * + * Configuration: + * + *
+ * <DataStore class="org.apache.jackrabbit.aws.ext.ds.CachingDataStore">
+ * 
+ *     <param name="{@link #setPath(String) path}" value="/data/datastore"/>
+ *     <param name="{@link #setConfig(String) config}" value="${rep.home}/backend.properties"/>
+ *     <param name="{@link #setCacheSize(long) cacheSize}" value="68719476736"/>
+ *     <param name="{@link #setSecret(String) secret}" value="123456"/>
+ *     <param name="{@link #setCachePurgeTrigFactor(double)}" value="0.95d"/>
+ *     <param name="{@link #setCachePurgeResizeFactor(double) cacheSize}" value="0.85d"/>
+ *     <param name="{@link #setMinRecordLength(int) minRecordLength}" value="1024"/>
+ * </DataStore>
+ */
+public abstract class CachingDataStore extends AbstractDataStore implements
+        MultiDataStoreAware {
+
+    /**
+     * Logger instance.
+     */
+    private static final Logger LOG = LoggerFactory.getLogger(CachingDataStore.class);
+
+    /**
+     * The digest algorithm used to uniquely identify records.
+     */
+    private static final String DIGEST = "SHA-1";
+
+    private static final String DS_STORE = ".DS_Store";
+
+    /**
+     * The minimum size of an object that should be stored in this data store.
+     */
+    private int minRecordLength = 16 * 1024;
+
+    private String path;
+
+    private File directory;
+
+    private File tmpDir;
+
+    private String secret;
+
+    /**
+     * The optional backend configuration.
+     */
+    private String config;
+
+    /**
+     * Name of the directory used for temporary files. Must be at least 3
+     * characters.
+     */
+    private static final String TMP = "tmp";
+
+    /**
+     * The minimum modified date. If a file is accessed (read or write) with a
+     * modified date older than this value, the modified date is updated to the
+     * current time.
+     */
+    private long minModifiedDate;
+
+    /**
+     * Cache purge trigger factor. Cache will undergo in auto-purge mode if
+     * cache current size is greater than cachePurgeTrigFactor * cacheSize
+     */
+    private double cachePurgeTrigFactor = 0.95d;
+
+    /**
+     * Cache resize factor. After auto-purge mode, cache current size would just
+     * greater than cachePurgeResizeFactor * cacheSize cacheSize
+     */
+    private double cachePurgeResizeFactor = 0.85d;
+
+    /**
+     * All data identifiers that are currently in use are in this set until they
+     * are garbage collected.
+     */
+    protected Map> inUse = 
+            Collections.synchronizedMap(new WeakHashMap>());
+
+    /**
+     * The number of bytes in the cache. The default value is 64 GB.
+     */
+    private long cacheSize = 64L * 1024 * 1024 * 1024;
+
+    protected Backend backend;
+
+    /**
+     * The local file system cache.
+     */
+    private LocalCache cache;
+
+    abstract Backend createBackend();
+
+    abstract String getMarkerFile();
+
+    /**
+     * Initialized the data store. If the path is not set, <repository
+     * home>/repository/datastore is used. This directory is automatically
+     * created if it does not yet exist. During first initialization, it upload
+     * all files from local datastore to backed and local datastore act as a
+     * local cache.
+     */
+    public void init(String homeDir) throws RepositoryException {
+        if (path == null) {
+            path = homeDir + "/repository/datastore";
+        }
+        directory = new File(path);
+        try {
+            mkdirs(directory);
+        } catch (IOException e) {
+            throw new DataStoreException("Could not create directory "
+                + directory.getAbsolutePath(), e);
+        }
+        tmpDir = new File(homeDir, "/repository/s3tmp");
+        try {
+            if (!mkdirs(tmpDir)) {
+                FileUtils.cleanDirectory(tmpDir);
+                LOG.info("tmp = " + tmpDir.getPath() + " cleaned");
+            }
+        } catch (IOException e) {
+            throw new DataStoreException("Could not create directory "
+                + tmpDir.getAbsolutePath(), e);
+        }
+        LOG.info("cachePurgeTrigFactor = " + cachePurgeTrigFactor
+            + ", cachePurgeResizeFactor = " + cachePurgeResizeFactor);
+        backend = createBackend();
+        backend.init(this, path, config);
+        String markerFileName = getMarkerFile();
+        if (markerFileName != null) {
+            // create marker file in homeDir to avoid deletion in cache cleanup.
+            File markerFile = new File(homeDir, markerFileName);
+            if (!markerFile.exists()) {
+                LOG.info("load files from local cache");
+                loadFilesFromCache();
+                try {
+                    markerFile.createNewFile();
+                } catch (IOException e) {
+                    throw new DataStoreException(
+                        "Could not create marker file "
+                            + markerFile.getAbsolutePath(), e);
+                }
+            } else {
+                LOG.info("marker file = " + markerFile.getAbsolutePath()
+                    + " exists");
+            }
+        }
+        cache = new LocalCache(path, tmpDir.getAbsolutePath(), cacheSize,
+            cachePurgeTrigFactor, cachePurgeResizeFactor);
+    }
+
+    /**
+     * Creates a new data record in {@link Backend}. The stream is first
+     * consumed and the contents are saved in a temporary file and the SHA-1
+     * message digest of the stream is calculated. If a record with the same
+     * SHA-1 digest (and length) is found then it is returned. Otherwise new
+     * record is created in {@link Backend} and the temporary file is moved in
+     * place to {@link LocalCache}.
+     * 
+     * @param input binary stream
+     * @return {@link CachingDataRecord}
+     * @throws DataStoreException if the record could not be created.
+     */
+    public DataRecord addRecord(InputStream input) throws DataStoreException {
+        File temporary = null;
+        try {
+            temporary = newTemporaryFile();
+            DataIdentifier tempId = new DataIdentifier(temporary.getName());
+            usesIdentifier(tempId);
+            // Copy the stream to the temporary file and calculate the
+            // stream length and the message digest of the stream
+            long length = 0;
+            MessageDigest digest = MessageDigest.getInstance(DIGEST);
+            OutputStream output = new DigestOutputStream(new FileOutputStream(
+                temporary), digest);
+            try {
+                length = IOUtils.copyLarge(input, output);
+            } finally {
+                output.close();
+            }
+            DataIdentifier identifier = new DataIdentifier(
+                encodeHexString(digest.digest()));
+            synchronized (this) {
+                usesIdentifier(identifier);
+                backend.write(identifier, temporary);
+                String fileName = getFileName(identifier);
+                cache.store(fileName, temporary);
+            }
+            // this will also make sure that
+            // tempId is not garbage collected until here
+            inUse.remove(tempId);
+            return new CachingDataRecord(this, identifier);
+        } catch (NoSuchAlgorithmException e) {
+            throw new DataStoreException(DIGEST + " not available", e);
+        } catch (IOException e) {
+            throw new DataStoreException("Could not add record", e);
+        } finally {
+            if (temporary != null) {
+                // try to delete - but it's not a big deal if we can't
+                temporary.delete();
+            }
+        }
+    }
+
+    /**
+     * Get a data record for the given identifier or null it data record doesn't
+     * exist in {@link Backend}
+     * 
+     * @param identifier identifier of record.
+     * @return the {@link CachingDataRecord} or null.
+     */
+    public DataRecord getRecordIfStored(DataIdentifier identifier)
+            throws DataStoreException {
+        synchronized (this) {
+            usesIdentifier(identifier);
+            if (!backend.exists(identifier)) {
+                return null;
+            }
+            backend.touch(identifier, minModifiedDate);
+            return new CachingDataRecord(this, identifier);
+        }
+    }
+
+    public void updateModifiedDateOnAccess(long before) {
+        LOG.info("minModifiedDate set to: " + before);
+        minModifiedDate = before;
+    }
+    /**
+     * Retrieves all identifiers from {@link Backend}.
+     */
+    public Iterator getAllIdentifiers()
+            throws DataStoreException {
+        return backend.getAllIdentifiers();
+    }
+
+    /**
+     * This method deletes record from {@link Backend} and then from
+     * {@link LocalCache}
+     */
+    public void deleteRecord(DataIdentifier identifier)
+            throws DataStoreException {
+        String fileName = getFileName(identifier);
+        synchronized (this) {
+            backend.deleteRecord(identifier);
+            cache.delete(fileName);
+        }
+    }
+
+    public synchronized int deleteAllOlderThan(long min)
+            throws DataStoreException {
+        List diList = backend.deleteAllOlderThan(min);
+        // remove entries from local cache
+        for (DataIdentifier identifier : diList) {
+            cache.delete(getFileName(identifier));
+        }
+        return diList.size();
+    }
+
+    /**
+     * Get stream of record from {@link LocalCache}. If record is not available
+     * in {@link LocalCache}, this method fetches record from {@link Backend}
+     * and stores it to {@link LocalCache}. Stream is then returned from cached
+     * record.
+     */
+    InputStream getStream(DataIdentifier identifier) throws DataStoreException {
+        InputStream in = null;
+        try {
+            String fileName = getFileName(identifier);
+            InputStream cached = cache.getIfStored(fileName);
+            if (cached != null) {
+                return cached;
+            }
+            in = backend.read(identifier);
+            return cache.store(fileName, in);
+        } catch (IOException e) {
+            throw new DataStoreException("IO Exception: " + identifier, e);
+        } finally {
+            IOUtils.closeQuietly(in);
+        }
+    }
+
+    /**
+     * Return lastModified of record from {@link Backend} assuming
+     * {@link Backend} as a single source of truth.
+     */
+    long getLastModified(DataIdentifier identifier) throws DataStoreException {
+        LOG.info("accessed lastModified");
+        return backend.getLastModified(identifier);
+    }
+
+    /**
+     * Return the length of record from {@link LocalCache} if available,
+     * otherwise retrieve it from {@link Backend}.
+     */
+    long getLength(DataIdentifier identifier) throws DataStoreException {
+        String fileName = getFileName(identifier);
+        Long length = cache.getFileLength(fileName);
+        if (length != null) {
+            return length.longValue();
+        }
+        return backend.getLength(identifier);
+    }
+
+    @Override
+    protected byte[] getOrCreateReferenceKey() throws DataStoreException {
+        try {
+            return secret.getBytes("UTF-8");
+        } catch (UnsupportedEncodingException e) {
+            throw new DataStoreException(e);
+        }
+    }
+
+    /**
+     * Returns a unique temporary file to be used for creating a new data
+     * record.
+     */
+    private File newTemporaryFile() throws IOException {
+        return File.createTempFile(TMP, null, tmpDir);
+    }
+
+    /**
+     * Load files from {@link LocalCache} to {@link Backend}.
+     */
+    private void loadFilesFromCache() throws RepositoryException {
+        ArrayList files = new ArrayList();
+        listRecursive(files, directory);
+        long totalSize = 0;
+        for (File f : files) {
+            totalSize += f.length();
+        }
+        long currentSize = 0;
+        long time = System.currentTimeMillis();
+        for (File f : files) {
+            long now = System.currentTimeMillis();
+            if (now > time + 5000) {
+                LOG.info("Uploaded {" + currentSize + "}/{" + totalSize + "}");
+                time = now;
+            }
+            currentSize += f.length();
+            String name = f.getName();
+            LOG.debug("upload file = " + name);
+            if (!name.startsWith(TMP) && !name.endsWith(DS_STORE)
+                && f.length() > 0) {
+                loadFileToBackEnd(f);
+            }
+        }
+        LOG.info("Uploaded {" + currentSize + "}/{" + totalSize + "}");
+    }
+
+    /**
+     * Traverse recursively and populate list with files.
+     */
+    private void listRecursive(List list, File file) {
+        File[] files = file.listFiles();
+        if (files != null) {
+            for (File f : files) {
+                if (f.isDirectory()) {
+                    listRecursive(list, f);
+                } else {
+                    list.add(f);
+                }
+            }
+        }
+    }
+    /**
+     * Upload file from {@link LocalCache} to {@link Backend}. 
+     * @param f file to uploaded.
+     * @throws DataStoreException
+     */
+    private void loadFileToBackEnd(File f) throws DataStoreException {
+        DataIdentifier identifier = new DataIdentifier(f.getName());
+        usesIdentifier(identifier);
+        backend.write(identifier, f);
+        LOG.debug(f.getName() + "uploaded.");
+
+    }
+
+    /**
+     * Derive file name from identifier.
+     */
+    private String getFileName(DataIdentifier identifier) {
+        String name = identifier.toString();
+        name = name.substring(0, 2) + "/" + name.substring(2, 4) + "/"
+            + name.substring(4, 6) + "/" + name;
+        return name;
+    }
+
+    private void usesIdentifier(DataIdentifier identifier) {
+        inUse.put(identifier, new WeakReference(identifier));
+    }
+
+    private boolean mkdirs(File dir) throws IOException {
+        if (dir.exists()) {
+            if (dir.isFile()) {
+                throw new IOException("Can not create a directory "
+                    + "because a file exists with the same name: "
+                    + dir.getAbsolutePath());
+            }
+            return false;
+        } else {
+            boolean created = dir.mkdirs();
+            if (!created) {
+                throw new IOException("Could not create directory: "
+                    + dir.getAbsolutePath());
+            }
+            return created;
+        }
+    }
+
+    public void clearInUse() {
+        inUse.clear();
+    }
+
+    public void close() throws DataStoreException {
+        cache.close();
+        backend.close();
+        cache = null;
+    }
+
+    /**
+     * Setter for configuration based secret
+     * 
+     * @param secret the secret used to sign reference binaries
+     */
+    public void setSecret(String secret) {
+        this.secret = secret;
+    }
+
+    /**
+     * Set the minimum object length.
+     * 
+     * @param minRecordLength the length
+     */
+    public void setMinRecordLength(int minRecordLength) {
+        this.minRecordLength = minRecordLength;
+    }
+
+    /**
+     * Return mininum object length.
+     */
+    public int getMinRecordLength() {
+        return minRecordLength;
+    }
+
+    /**
+     * Return path of configuration properties.
+     * 
+     * @return path of configuration properties.
+     */
+    public String getConfig() {
+        return config;
+    }
+
+    /**
+     * Set the configuration properties path.
+     * 
+     * @param config path of configuration properties.
+     */
+    public void setConfig(String config) {
+        this.config = config;
+    }
+    /**
+     * @return  size of {@link LocalCache}. 
+     */
+    public long getCacheSize() {
+        return cacheSize;
+    }
+    /**
+     * Set size of {@link LocalCache}.
+     * @param cacheSize size of {@link LocalCache}.  
+     */
+    public void setCacheSize(long cacheSize) {
+        this.cacheSize = cacheSize;
+    }
+    /**
+     * 
+     * @return path of {@link LocalCache}.
+     */
+    public String getPath() {
+        return path;
+    }
+    /**
+     * Set path of {@link LocalCache}.
+     * @param path of {@link LocalCache}.
+     */
+    public void setPath(String path) {
+        this.path = path;
+    }
+
+    /**
+     * @return Purge trigger factor of {@link LocalCache}.
+     */
+    public double getCachePurgeTrigFactor() {
+        return cachePurgeTrigFactor;
+    }
+
+    /**
+     * Set purge trigger factor of {@link LocalCache}.
+     * @param cachePurgeTrigFactor purge trigger factor.
+     */
+    public void setCachePurgeTrigFactor(double cachePurgeTrigFactor) {
+        this.cachePurgeTrigFactor = cachePurgeTrigFactor;
+    }
+
+    /**
+     * @return Purge resize factor of {@link LocalCache}.
+     */
+    public double getCachePurgeResizeFactor() {
+        return cachePurgeResizeFactor;
+    }
+
+    /**
+     * Set purge resize factor of {@link LocalCache}.
+     * @param cachePurgeResizeFactor purge resize factor.
+     */
+    public void setCachePurgeResizeFactor(double cachePurgeResizeFactor) {
+        this.cachePurgeResizeFactor = cachePurgeResizeFactor;
+    }
+
+}
Index: jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/S3Backend.java
===================================================================
--- jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/S3Backend.java	(revision 0)
+++ jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/S3Backend.java	(working copy)
@@ -0,0 +1,410 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jackrabbit.aws.ext.ds;
+
+import java.io.File;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import org.apache.jackrabbit.aws.ext.S3Constants;
+import org.apache.jackrabbit.aws.ext.Utils;
+import org.apache.jackrabbit.core.data.DataIdentifier;
+import org.apache.jackrabbit.core.data.DataStoreException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.amazonaws.AmazonServiceException;
+import com.amazonaws.services.s3.AmazonS3Client;
+import com.amazonaws.services.s3.model.CopyObjectRequest;
+import com.amazonaws.services.s3.model.DeleteObjectsRequest;
+import com.amazonaws.services.s3.model.DeleteObjectsResult;
+import com.amazonaws.services.s3.model.ObjectListing;
+import com.amazonaws.services.s3.model.ObjectMetadata;
+import com.amazonaws.services.s3.model.PutObjectRequest;
+import com.amazonaws.services.s3.model.Region;
+import com.amazonaws.services.s3.model.S3Object;
+import com.amazonaws.services.s3.model.S3ObjectSummary;
+import com.amazonaws.services.s3.transfer.TransferManager;
+import com.amazonaws.services.s3.transfer.Upload;
+import com.amazonaws.services.s3.transfer.model.UploadResult;
+
+/**
+ * A data store backend that stores data on Amazon S3.
+ */
+public class S3Backend implements Backend {
+
+    /**
+     * Logger instance.
+     */
+    private static final Logger LOG = LoggerFactory.getLogger(S3Backend.class);
+
+    private AmazonS3Client s3service;
+
+    private String bucket;
+
+    private TransferManager tmx;
+
+    private CachingDataStore store;
+
+    private static final String KEY_PREFIX = "dataStore_";
+
+    /**
+     * The default value AWS bucket region.
+     */
+    private static final String DEFAULT_AWS_BUCKET_REGION = "us-standard";
+
+    /**
+     * constants to define endpoint to various AWS region
+     */
+    private static final String AWSDOTCOM = "amazonaws.com";
+
+    private static final String S3 = "s3";
+
+    private static final String DOT = ".";
+
+    private static final String DASH = "-";
+
+    /**
+     * Initialize S3Backend. It creates AmazonS3Client and TransferManager from
+     * aws.properties. It creates S3 bucket if it doesn't pre-exist in S3.
+     */
+    public void init(CachingDataStore store, String homeDir, String config)
+            throws DataStoreException {
+        if (config == null) {
+            config = Utils.DEFAULT_CONFIG_FILE;
+        }
+        try {
+            Properties prop = Utils.readConfig(config);
+            LOG.debug("init");
+            this.store = store;
+            s3service = Utils.openService(prop);
+            bucket = prop.getProperty(S3Constants.S3_BUCKET);
+            String region = prop.getProperty(S3Constants.S3_REGION);
+            String endpoint = null;
+            if (!s3service.doesBucketExist(bucket)) {
+
+                if (DEFAULT_AWS_BUCKET_REGION.equals(region)) {
+                    s3service.createBucket(bucket, Region.US_Standard);
+                    endpoint = S3 + DOT + AWSDOTCOM;
+                } else if (Region.EU_Ireland.toString().equals(region)) {
+                    s3service.createBucket(bucket, Region.EU_Ireland);
+                    endpoint = "s3-eu-west-1" + DOT + AWSDOTCOM;
+                } else {
+                    s3service.createBucket(bucket, region);
+                    endpoint = S3 + DASH + region + DOT + AWSDOTCOM;
+                }
+                LOG.info("Created bucket: " + bucket + " in " + region);
+            } else {
+                LOG.info("Using bucket: " + bucket);
+                if (DEFAULT_AWS_BUCKET_REGION.equals(region)) {
+                    endpoint = S3 + DOT + AWSDOTCOM;
+                } else if (Region.EU_Ireland.toString().equals(region)) {
+                    endpoint = "s3-eu-west-1" + DOT + AWSDOTCOM;
+                } else {
+                    endpoint = S3 + DASH + region + DOT + AWSDOTCOM;
+                }
+            }
+            /*
+             * setting endpoint to remove latency of redirection. If endpoint is
+             * not set, invocation first goes us standard region, which
+             * redirects it to correct location.
+             */
+            s3service.setEndpoint(endpoint);
+            LOG.info("S3 service endpoint: " + endpoint);
+            tmx = new TransferManager(s3service, createDefaultExecutorService());
+            LOG.debug("  done");
+        } catch (Exception e) {
+            LOG.debug("  error ", e);
+            throw new DataStoreException("Could not initialize S3 from "
+                + config, e);
+        }
+    }
+
+    /**
+     * It uploads file to Amazon S3. If file size is greater than 5MB, this
+     * method uses parallel concurrent connections to upload.
+     */
+    public void write(DataIdentifier identifier, File file)
+            throws DataStoreException {
+        String key = getKeyName(identifier);
+        ObjectMetadata objectMetaData = null;
+        long start = System.currentTimeMillis();
+        LOG.debug("write {0} length {1}", identifier, file.length());
+        try {
+            // check if the same record already exists
+            try {
+                objectMetaData = s3service.getObjectMetadata(bucket, key);
+            } catch (AmazonServiceException ase) {
+                if (ase.getStatusCode() != 404) {
+                    throw ase;
+                }
+            }
+            if (objectMetaData != null) {
+                long l = objectMetaData.getContentLength();
+                if (l != file.length()) {
+                    throw new DataStoreException("Collision: " + key
+                        + " new length: " + file.length() + " old length: " + l);
+                }
+                LOG.debug(key + "   exists");
+                CopyObjectRequest copReq = new CopyObjectRequest(bucket, key,
+                    bucket, key);
+                copReq.setNewObjectMetadata(objectMetaData);
+                s3service.copyObject(copReq);
+                LOG.debug("lastModified of " + identifier.toString()
+                    + " updated successfully");
+                LOG.debug("   updated");
+            }
+        } catch (AmazonServiceException e) {
+            LOG.debug("   does not exist", e);
+            // not found - create it
+        }
+        if (objectMetaData == null) {
+            LOG.debug("   creating");
+            try {
+                // start multipart parallel upload using amazon sdk
+                Upload up = tmx.upload(new PutObjectRequest(bucket, key, file));
+                // wait for upload to finish
+                UploadResult uploadResult = up.waitForUploadResult();
+                LOG.debug("   done");
+            } catch (Exception e2) {
+                LOG.debug("   could not upload", e2);
+                throw new DataStoreException("Could not upload " + key, e2);
+            }
+        }
+        LOG.debug("    ms: {0}", System.currentTimeMillis() - start);
+
+    }
+
+    /**
+     * Check if record identified by identifier exists in Amazon S3.
+     */
+    public boolean exists(DataIdentifier identifier) throws DataStoreException {
+        String key = getKeyName(identifier);
+        try {
+            LOG.debug("exists {0}", identifier);
+            ObjectMetadata objectMetaData = s3service.getObjectMetadata(bucket,
+                key);
+            if (objectMetaData != null) {
+                LOG.debug("  true");
+                return true;
+            }
+            return false;
+        } catch (AmazonServiceException e) {
+            if (e.getStatusCode() == 404) {
+                LOG.info("key [" + identifier.toString() + "] not found.");
+                return false;
+            }
+            throw new DataStoreException(
+                "Error occured to getObjectMetadata for key ["
+                    + identifier.toString() + "]", e);
+        }
+    }
+
+    public void touch(DataIdentifier identifier, long minModifiedDate)
+            throws DataStoreException {
+        String key = getKeyName(identifier);
+        try {
+            if (minModifiedDate != 0) {
+                ObjectMetadata objectMetaData = s3service.getObjectMetadata(
+                    bucket, key);
+                if (objectMetaData.getLastModified().getTime() < minModifiedDate) {
+                    CopyObjectRequest copReq = new CopyObjectRequest(bucket,
+                        key, bucket, key);
+                    copReq.setNewObjectMetadata(objectMetaData);
+                    s3service.copyObject(copReq);
+                    LOG.debug("lastModified of " + identifier.toString()
+                        + " updated successfully");
+                }
+            }
+        } catch (Exception e) {
+            throw new DataStoreException(
+                "An Exception occurred while trying to set the last modified date of record "
+                    + identifier.toString(), e);
+        }
+    }
+
+    public InputStream read(DataIdentifier identifier)
+            throws DataStoreException {
+        String key = getKeyName(identifier);
+        try {
+            LOG.debug("read {" + identifier + "}");
+            S3Object object = s3service.getObject(bucket, key);
+            InputStream in = object.getObjectContent();
+            LOG.debug("  return");
+            return in;
+        } catch (AmazonServiceException e) {
+            throw new DataStoreException("Object not found: " + key, e);
+        }
+    }
+
+    public Iterator getAllIdentifiers()
+            throws DataStoreException {
+        try {
+            LOG.debug("getAllIdentifiers");
+            Set ids = new HashSet();
+            ObjectListing prevObjectListing = s3service.listObjects(bucket,
+                KEY_PREFIX);
+            while (true) {
+                for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
+                    String id = getIdentifierName(s3ObjSumm.getKey());
+                    if (id != null) {
+                        ids.add(new DataIdentifier(id));
+                    }
+                }
+                if (!prevObjectListing.isTruncated()) {
+                    break;
+                }
+                prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
+            }
+            LOG.debug("  return");
+            return ids.iterator();
+        } catch (AmazonServiceException e) {
+            throw new DataStoreException("Could not list objects", e);
+        }
+    }
+
+    public long getLastModified(DataIdentifier identifier)
+            throws DataStoreException {
+        String key = getKeyName(identifier);
+        try {
+            ObjectMetadata object = s3service.getObjectMetadata(bucket, key);
+            return object.getLastModified().getTime();
+        } catch (AmazonServiceException e) {
+            throw new DataStoreException(
+                "Could not getLastModified of dataIdentifier " + identifier, e);
+        }
+    }
+
+    public long getLength(DataIdentifier identifier) throws DataStoreException {
+        String key = getKeyName(identifier);
+        try {
+            ObjectMetadata object = s3service.getObjectMetadata(bucket, key);
+            return object.getContentLength();
+        } catch (AmazonServiceException e) {
+            throw new DataStoreException("Could not length of dataIdentifier "
+                + identifier, e);
+        }
+    }
+
+    public void deleteRecord(DataIdentifier identifier)
+            throws DataStoreException {
+        String key = getKeyName(identifier);
+        try {
+            s3service.deleteObject(bucket, key);
+        } catch (AmazonServiceException e) {
+            throw new DataStoreException(
+                "Could not getLastModified of dataIdentifier " + identifier, e);
+        }
+    }
+
+    public List deleteAllOlderThan(long min)
+            throws DataStoreException {
+        LOG.info("deleteAllOlderThan " + new Date(min));
+        List diDeleteList = new ArrayList(30);
+        ObjectListing prevObjectListing = s3service.listObjects(bucket);
+        while (true) {
+            List deleteList = new ArrayList();
+            for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
+                DataIdentifier identifier = new DataIdentifier(
+                    getIdentifierName(s3ObjSumm.getKey()));
+                if (!store.inUse.containsKey(identifier)
+                    && s3ObjSumm.getLastModified().getTime() < min) {
+                    LOG.info("add id :" + s3ObjSumm.getKey()
+                        + " to delete lists");
+                    deleteList.add(new DeleteObjectsRequest.KeyVersion(
+                        s3ObjSumm.getKey()));
+                    diDeleteList.add(new DataIdentifier(
+                        getIdentifierName(s3ObjSumm.getKey())));
+                }
+            }
+            if (deleteList.size() > 0) {
+                DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(
+                    bucket);
+                delObjsReq.setKeys(deleteList);
+                DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq);
+                if (dobjs.getDeletedObjects().size() != deleteList.size()) {
+                    throw new DataStoreException(
+                        "Incomplete delete object request. only  "
+                            + dobjs.getDeletedObjects().size() + " out of "
+                            + deleteList.size() + " are deleted");
+                } else {
+                    LOG.info(deleteList.size()
+                        + " records deleted from datastore");
+                }
+            }
+            if (!prevObjectListing.isTruncated()) {
+                break;
+            }
+            prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
+        }
+        LOG.info("deleteAllOlderThan  exit");
+        return diDeleteList;
+    }
+
+    public void close() {
+        s3service.shutdown();
+        s3service = null;
+        tmx = null;
+    }
+
+    /**
+     * Get key from data identifier. Object is stored with key in S3.
+     */
+    private String getKeyName(DataIdentifier identifier) {
+        return KEY_PREFIX + identifier.toString();
+    }
+
+    /**
+     * Get data identifier from key.
+     */
+    private String getIdentifierName(String key) {
+        if (!key.startsWith(KEY_PREFIX)) {
+            return null;
+        }
+        return key.substring(KEY_PREFIX.length());
+    }
+
+    /**
+     * Returns a new thread pool configured with the default settings.
+     * 
+     * @return A new thread pool configured with the default settings.
+     */
+    private ThreadPoolExecutor createDefaultExecutorService() {
+        ThreadFactory threadFactory = new ThreadFactory() {
+            private int threadCount = 1;
+
+            public Thread newThread(Runnable r) {
+                Thread thread = new Thread(r);
+                thread.setContextClassLoader(getClass().getClassLoader());
+                thread.setName("s3-transfer-manager-worker-" + threadCount++);
+                return thread;
+            }
+        };
+        return (ThreadPoolExecutor) Executors.newFixedThreadPool(10,
+            threadFactory);
+    }
+}
Index: jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/S3DataStore.java
===================================================================
--- jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/S3DataStore.java	(revision 0)
+++ jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/ds/S3DataStore.java	(working copy)
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.aws.ext.ds;
+
+/**
+ * An Amazon S3 data store.
+ */
+public class S3DataStore extends CachingDataStore {
+
+    protected Backend createBackend() {
+        return new S3Backend();
+    }
+
+    protected String getMarkerFile() {
+        return "s3.init.done";
+    }
+
+}
Index: jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/LocalCache.java
===================================================================
--- jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/LocalCache.java	(revision 0)
+++ jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/LocalCache.java	(working copy)
@@ -0,0 +1,532 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jackrabbit.aws.ext;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Set;
+
+import javax.jcr.RepositoryException;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.aws.ext.ds.CachingDataStore;
+import org.apache.jackrabbit.core.data.LazyFileInputStream;
+import org.apache.jackrabbit.util.TransientFileFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class implements a LRU cache used by {@link CachingDataStore}. If cache
+ * size exceeds limit, this cache goes in purge mode. In purge mode any
+ * operation to cache is no-op. After purge cache size would be less than
+ * cachePurgeResizeFactor * maximum size.
+ */
+public class LocalCache {
+
+    /**
+     * Logger instance.
+     */
+    private static final Logger LOG = LoggerFactory.getLogger(LocalCache.class);
+
+    /**
+     * The directory where the files are created.
+     */
+    private final File directory;
+
+    /**
+     * The directory where tmp files are created.
+     */
+    private final File tmp;
+
+    /**
+     * The file names of the files that need to be deleted.
+     */
+    private final Set toBeDeleted = new HashSet();
+
+    /**
+     * The maximum size of cache in bytes.
+     */
+    private long maxSize;
+
+    /**
+     * The filename Vs file size LRU cache.
+     */
+    private LRUCache cache;
+
+    /**
+     * If true cache is in purgeMode and not available. All operation would be
+     * no-op.
+     */
+    private volatile boolean purgeMode;
+
+    /**
+     * Build LRU cache of files located at 'path'. It uses lastModified property
+     * of file to build LRU cache. If cache size exceeds limit size, this cache
+     * goes in purge mode. In purge mode any operation to cache is no-op.
+     * 
+     * @param path file system path
+     * @param tmpPath temporary directory used by cache.
+     * @param maxSize maximum size of cache.
+     * @param cachePurgeTrigFactor factor which triggers cache to purge mode.
+     * That is if current size exceed (cachePurgeTrigFactor * maxSize), the
+     * cache will go in auto-purge mode.
+     * @param cachePurgeResizeFactor after cache purge size of cache will be
+     * just less (cachePurgeResizeFactor * maxSize).
+     * @throws RepositoryException
+     */
+    public LocalCache(final String path, final String tmpPath,
+            final long maxSize, final double cachePurgeTrigFactor,
+            final double cachePurgeResizeFactor) throws RepositoryException {
+        this.maxSize = maxSize;
+        directory = new File(path);
+        tmp = new File(tmpPath);
+        cache = new LRUCache(maxSize, cachePurgeTrigFactor,
+            cachePurgeResizeFactor);
+        ArrayList allFiles = new ArrayList();
+
+        @SuppressWarnings("unchecked")
+        Iterator it = FileUtils.iterateFiles(directory, null, true);
+        while (it.hasNext()) {
+            File f = (File) it.next();
+            allFiles.add(f);
+        }
+        Collections.sort(allFiles, new Comparator() {
+            public int compare(final File o1, final File o2) {
+                long l1 = o1.lastModified(), l2 = o2.lastModified();
+                return l1 < l2 ? -1 : l1 > l2 ? 1 : 0;
+            }
+        });
+        String dataStorePath = directory.getAbsolutePath();
+        long time = System.currentTimeMillis();
+        int count = 0;
+        int deletecount = 0;
+        for (File f : allFiles) {
+            if (f.exists()) {
+                long length = f.length();
+                String name = f.getPath();
+                if (name.startsWith(dataStorePath)) {
+                    name = name.substring(dataStorePath.length());
+                }
+                // convert to java path format
+                name = name.replace("\\", "/");
+                if (name.startsWith("/") || name.startsWith("\\")) {
+                    name = name.substring(1);
+                }
+                if ((cache.currentSizeInBytes + length) < cache.maxSizeInBytes) {
+                    count++;
+                    cache.put(name, length);
+                } else {
+                    if (tryDelete(name)) {
+                        deletecount++;
+                    }
+                }
+                long now = System.currentTimeMillis();
+                if (now > time + 5000) {
+                    LOG.info("Processed {" + (count + deletecount) + "}/{"
+                        + allFiles.size() + "}");
+                    time = now;
+                }
+            }
+        }
+        LOG.info("Cached {" + count + "}/{" + allFiles.size()
+            + "} , currentSizeInBytes = " + cache.currentSizeInBytes);
+        LOG.info("Deleted {" + deletecount + "}/{" + allFiles.size()
+            + "} files .");
+    }
+
+    /**
+     * Store an item in the cache and return the input stream. If cache is in
+     * purgeMode or file doesn't exists, inputstream from a
+     * {@link TransientFileFactory#createTransientFile(String, String, File)} is
+     * returned. Otherwise inputStream from cached file is returned. This method
+     * doesn't close the incoming inputstream.
+     * 
+     * @param fileName the key of cache.
+     * @param in the inputstream.
+     * @return the (new) input stream.
+     */
+    public synchronized InputStream store(String fileName, final InputStream in)
+            throws IOException {
+        fileName = fileName.replace("\\", "/");
+        File f = getFile(fileName);
+        long length = 0;
+        if (!f.exists() || isInPurgeMode()) {
+            OutputStream out = null;
+            File transFile = null;
+            try {
+                TransientFileFactory tff = TransientFileFactory.getInstance();
+                transFile = tff.createTransientFile("s3-", "tmp", tmp);
+                out = new BufferedOutputStream(new FileOutputStream(transFile));
+                length = IOUtils.copyLarge(in, out);
+            } finally {
+                IOUtils.closeQuietly(out);
+            }
+            // rename the file to local fs cache
+            if (canAdmitFile(length)
+                && (f.getParentFile().exists() || f.getParentFile().mkdirs())
+                && transFile.renameTo(f) && f.exists()) {
+                if (transFile.exists() && transFile.delete()) {
+                    LOG.warn("tmp file = " + transFile.getAbsolutePath()
+                        + " not deleted successfully");
+                }
+                transFile = null;
+                toBeDeleted.remove(fileName);
+                if (cache.get(fileName) == null) {
+                    cache.put(fileName, f.length());
+                }
+            } else {
+                f = transFile;
+            }
+        } else {
+            // f.exists and not in purge mode
+            f.setLastModified(System.currentTimeMillis());
+            toBeDeleted.remove(fileName);
+            if (cache.get(fileName) == null) {
+                cache.put(fileName, f.length());
+            }
+        }
+        cache.tryPurge();
+        return new LazyFileInputStream(f);
+    }
+
+    /**
+     * Store an item along with file in cache. Cache size is increased by
+     * {@link File#length()} If file already exists in cache,
+     * {@link File#setLastModified(long)} is updated with current time.
+     * 
+     * @param fileName the key of cache.
+     * @param src file to be added to cache.
+     * @throws IOException
+     */
+    public synchronized void store(String fileName, final File src)
+            throws IOException {
+        fileName = fileName.replace("\\", "/");
+        File dest = getFile(fileName);
+        File parent = dest.getParentFile();
+        if (src.exists() && !dest.exists() && !src.equals(dest)
+            && canAdmitFile(src.length())
+            && (parent.exists() || parent.mkdirs()) && (src.renameTo(dest))) {
+            toBeDeleted.remove(fileName);
+            if (cache.get(fileName) == null) {
+                cache.put(fileName, dest.length());
+            }
+
+        } else if (dest.exists()) {
+            dest.setLastModified(System.currentTimeMillis());
+            toBeDeleted.remove(fileName);
+            if (cache.get(fileName) == null) {
+                cache.put(fileName, dest.length());
+            }
+        }
+        cache.tryPurge();
+    }
+
+    /**
+     * Return the inputstream from from cache, or null if not in the cache.
+     * 
+     * @param fileName name of file.
+     * @return  stream or null.
+     */
+    public InputStream getIfStored(String fileName) throws IOException {
+
+        fileName = fileName.replace("\\", "/");
+        File f = getFile(fileName);
+        synchronized (this) {
+            if (!f.exists() || isInPurgeMode()) {
+                log("purgeMode true or file doesn't exists: getIfStored returned");
+                return null;
+            } else {
+                f.setLastModified(System.currentTimeMillis());
+                return new LazyFileInputStream(f);
+            }
+        }
+    }
+
+    /**
+     * Delete file from cache. Size of cache is reduced by file length. The
+     * method is no-op if file doesn't exist in cache.
+     * 
+     * @param fileName file name that need to be removed from cache.
+     */
+    public synchronized void delete(String fileName) {
+        if (isInPurgeMode()) {
+            log("purgeMode true :delete returned");
+            return;
+        }
+        fileName = fileName.replace("\\", "/");
+        cache.remove(fileName);
+    }
+
+    /**
+     * Returns length of file if exists in cache else returns null.
+     * @param fileName name of the file.
+     */
+    public Long getFileLength(String fileName) {
+        fileName = fileName.replace("\\", "/");
+        File f = getFile(fileName);
+        synchronized (this) {
+            if (!f.exists() || isInPurgeMode()) {
+                log("purgeMode true or file doesn't exists: getFileLength returned");
+                return null;
+            }
+            f.setLastModified(System.currentTimeMillis());
+            return f.length();
+        }
+    }
+
+    /**
+     * Close the cache. Cache maintain set of files which it was not able to
+     * delete successfully. This method will an attempt to delete all
+     * unsuccessful delete files.
+     */
+    public void close() {
+        log("close");
+        deleteOldFiles();
+    }
+
+    /**
+     * Check if cache can admit file of given length.
+     * @param length of the file.
+     * @return true if yes else return false.
+     */
+    private synchronized boolean canAdmitFile(final long length) {
+        // order is important here
+        boolean value = !isInPurgeMode() && (cache.canAdmitFile(length));
+        if (!value) {
+            log("cannot admit file of length=" + length
+                + " and currentSizeInBytes=" + cache.currentSizeInBytes);
+        }
+        return value;
+    }
+
+    /**
+     * Return true if cache is in purge mode else return false.
+     */
+    private synchronized boolean isInPurgeMode() {
+        return purgeMode || maxSize == 0;
+    }
+
+    /**
+     * Set purge mode. If set to true all cache operation will be no-op. If set
+     * to false, all operations to cache are available.
+     * 
+     * @param purgeMode purge mode
+     */
+    private synchronized void setPurgeMode(final boolean purgeMode) {
+        this.purgeMode = purgeMode;
+    }
+
+    private File getFile(final String fileName) {
+        return new File(directory, fileName);
+    }
+
+    private void deleteOldFiles() {
+        int initialSize = toBeDeleted.size();
+        int count = 0;
+        for (String n : new ArrayList(toBeDeleted)) {
+            if (tryDelete(n)) {
+                count++;
+            }
+        }
+        LOG.info("deleted [" + count + "]/[" + initialSize + "] files");
+    }
+
+    /**
+     * This method tries to delete a file. If it is not able to delete file due
+     * to any reason, it add it toBeDeleted list.
+     * 
+     * @param fileName name of the file which will be deleted.
+     * @return true if this method deletes file successfuly else return false.
+     */
+    private boolean tryDelete(final String fileName) {
+        log("cache delete " + fileName);
+        File f = getFile(fileName);
+        if (f.exists() && f.delete()) {
+            log(fileName + "  deleted successfully");
+            toBeDeleted.remove(fileName);
+            while (true) {
+                f = f.getParentFile();
+                if (f.equals(directory) || f.list().length > 0) {
+                    break;
+                }
+                // delete empty parent folders (except the main directory)
+                f.delete();
+            }
+            return true;
+        } else if (f.exists()) {
+            LOG.info("not able to delete file = " + f.getAbsolutePath());
+            toBeDeleted.add(fileName);
+            return false;
+        }
+        return true;
+    }
+
+    private static int maxSizeElements(final long bytes) {
+        // after a CQ installation, the average item in
+        // the data store is about 52 KB
+        int count = (int) (bytes / 65535);
+        count = Math.max(1024, count);
+        count = Math.min(64 * 1024, count);
+        return count;
+    }
+
+    private void log(final String s) {
+        LOG.debug(s);
+    }
+
+    /**
+     * A LRU based extension {@link LinkedHashMap}. The key is file name and
+     * value is length of file.
+     */
+    private class LRUCache extends LinkedHashMap {
+        private static final long serialVersionUID = 1L;
+
+        private volatile long currentSizeInBytes;
+
+        private final long maxSizeInBytes;
+
+        private long cachePurgeTrigSize;
+
+        private long cachePurgeResize;
+
+        public LRUCache(final long maxSizeInBytes,
+                final double cachePurgeTrigFactor,
+                final double cachePurgeResizeFactor) {
+            super(maxSizeElements(maxSizeInBytes), (float) 0.75, true);
+            this.maxSizeInBytes = maxSizeInBytes;
+            this.cachePurgeTrigSize = new Double(cachePurgeTrigFactor
+                * maxSizeInBytes).longValue();
+            this.cachePurgeResize = new Double(cachePurgeResizeFactor
+                * maxSizeInBytes).longValue();
+        }
+
+        /**
+         * Overridden {@link Map#remove(Object)} to delete corresponding file
+         * from file system.
+         */
+        @Override
+        public synchronized Long remove(final Object key) {
+            String fileName = (String) key;
+            fileName = fileName.replace("\\", "/");
+            Long flength = null;
+            if (tryDelete(fileName)) {
+                flength = super.remove(key);
+                if (flength != null) {
+                    log("cache entry { " + fileName + "} with size {" + flength
+                        + "} removed.");
+                    currentSizeInBytes -= flength.longValue();
+                }
+            } else if (!getFile(fileName).exists()) {
+                // second attempt. remove from cache if file doesn't exists
+                flength = super.remove(key);
+                if (flength != null) {
+                    log(" file not exists. cache entry { " + fileName
+                        + "} with size {" + flength + "} removed.");
+                    currentSizeInBytes -= flength.longValue();
+                }
+            }
+            return flength;
+        }
+
+        @Override
+        public synchronized Long put(final String key, final Long value) {
+            long flength = value.longValue();
+            currentSizeInBytes += flength;
+            return super.put(key.replace("\\", "/"), value);
+        }
+
+        /**
+         * This method tries purging of local cache. It checks if local cache
+         * has exceeded the defined limit then it triggers purge cache job in a
+         * seperate thread.
+         */
+        private synchronized void tryPurge() {
+            if (currentSizeInBytes > cachePurgeTrigSize && !isInPurgeMode()) {
+                setPurgeMode(true);
+                LOG.info("currentSizeInBytes[" + cache.currentSizeInBytes
+                    + "] exceeds (cachePurgeTrigSize)["
+                    + cache.cachePurgeTrigSize + "]");
+                new Thread(new PurgeJob()).start();
+            }
+        }
+        /**
+         * This method check if cache can admit file of given length. 
+         * @param length length of file.
+         * @return true if cache size + length is less than maxSize.
+         */
+        private synchronized boolean canAdmitFile(final long length) {
+            return (cache.currentSizeInBytes + length < cache.maxSizeInBytes);
+        }
+    }
+
+    /**
+     * This class performs purging of local cache. It implements
+     * {@link Runnable} and should be invoked in a separate thread.
+     */
+    private class PurgeJob implements Runnable {
+        /**
+         * This method purges local cache till its size is less than
+         * cacheResizefactor * maxSize
+         */
+        public void run() {
+            try {
+                synchronized (cache) {
+                    LOG.info(" cache purge job started");
+                    // first try to delete toBeDeleted files
+                    int initialSize = cache.size();
+                    for (String fileName : new ArrayList(toBeDeleted)) {
+                        cache.remove(fileName);
+                    }
+                    Iterator> itr = cache.entrySet().iterator();
+                    while (itr.hasNext()) {
+                        Map.Entry entry = itr.next();
+                        if (entry.getKey() != null) {
+                            if (cache.currentSizeInBytes > cache.cachePurgeResize) {
+                                itr.remove();
+
+                            } else {
+                                break;
+                            }
+                        }
+
+                    }
+                    LOG.info(" cache purge job completed: cleaned ["
+                        + (initialSize - cache.size())
+                        + "] files and currentSizeInBytes = [ "
+                        + cache.currentSizeInBytes + "]");
+                }
+            } catch (Exception e) {
+                LOG.error("error in purge jobs:", e);
+            } finally {
+                setPurgeMode(false);
+            }
+        }
+    }
+}
Index: jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/S3Constants.java
===================================================================
--- jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/S3Constants.java	(revision 0)
+++ jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/S3Constants.java	(working copy)
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jackrabbit.aws.ext;
+
+/**
+ * Defined Amazon S3 constants.
+ */
+public final class S3Constants {
+    
+    /**
+     * private constructor so that class cannot initialized from outside.
+     */
+    private S3Constants() {
+
+    }
+
+    /**
+     * Amazon aws access key.
+     */
+    public static final String ACCESS_KEY = "accessKey";
+
+    /**
+     * Amazon aws secret key.
+     */
+    public static final String SECRET_KEY = "secretKey";
+
+    /**
+     * Amazon aws S3 bucket.
+     */
+    public static final String S3_BUCKET = "s3Bucket";
+
+    /**
+     * Amazon aws S3 region.
+     */
+    public static final String S3_REGION = "s3Region";
+   
+}
Index: jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/Utils.java
===================================================================
--- jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/Utils.java	(revision 0)
+++ jackrabbit-aws-ext/src/main/java/org/apache/jackrabbit/aws/ext/Utils.java	(working copy)
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jackrabbit.aws.ext;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.amazonaws.ClientConfiguration;
+import com.amazonaws.auth.AWSCredentials;
+import com.amazonaws.auth.BasicAWSCredentials;
+import com.amazonaws.services.s3.AmazonS3Client;
+import com.amazonaws.services.s3.model.DeleteObjectsRequest;
+import com.amazonaws.services.s3.model.DeleteObjectsResult;
+import com.amazonaws.services.s3.model.ObjectListing;
+import com.amazonaws.services.s3.model.S3ObjectSummary;
+
+/**
+ * Amazon S3 utilities.
+ */
+public final class Utils {
+
+    private static final Logger LOG = LoggerFactory.getLogger(Utils.class);
+
+    public static final String DEFAULT_CONFIG_FILE = "aws.properties";
+
+    private static final String DELETE_CONFIG_SUFFIX = ";burn";
+
+    /**
+     * private constructor so that class cannot initialized from outside.
+     */
+    private Utils() {
+
+    }
+
+    /**
+     * Create AmazonS3Client from properties.
+     * 
+     * @param prop properties to configure @link {@link AmazonS3Client}
+     * @return {@link AmazonS3Client}
+     */
+    public static AmazonS3Client openService(final Properties prop) {
+        AWSCredentials credentials = new BasicAWSCredentials(
+            prop.getProperty(S3Constants.ACCESS_KEY),
+            prop.getProperty(S3Constants.SECRET_KEY));
+        int connectionTimeOut = Integer.parseInt(prop.getProperty("connectionTimeout"));
+        int socketTimeOut = Integer.parseInt(prop.getProperty("socketTimeout"));
+        int maxConnections = Integer.parseInt(prop.getProperty("maxConnections"));
+        int maxErrorRetry = Integer.parseInt(prop.getProperty("maxErrorRetry"));
+        ClientConfiguration cc = new ClientConfiguration();
+        cc.setConnectionTimeout(connectionTimeOut);
+        cc.setSocketTimeout(socketTimeOut);
+        cc.setMaxConnections(maxConnections);
+        cc.setMaxErrorRetry(maxErrorRetry);
+        return new AmazonS3Client(credentials, cc);
+    }
+
+    /**
+     * Delete S3 bucket. This method first deletes all objects from bucket and
+     * then delete empty bucket.
+     * 
+     * @param prop properties to configure @link {@link AmazonS3Client} and
+     * delete bucket.
+     */
+    public static void deleteBucket(final Properties prop) throws IOException {
+        AmazonS3Client s3service = openService(prop);
+        String bucketName = prop.getProperty(S3Constants.S3_BUCKET);
+        if (!s3service.doesBucketExist(bucketName)) {
+            return;
+        }
+        ObjectListing prevObjectListing = s3service.listObjects(bucketName);
+        while (true) {
+            List deleteList = new ArrayList();
+            for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
+                deleteList.add(new DeleteObjectsRequest.KeyVersion(
+                    s3ObjSumm.getKey()));
+            }
+            if (deleteList.size() > 0) {
+                DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(
+                    bucketName);
+                delObjsReq.setKeys(deleteList);
+                DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq);
+                if (dobjs.getDeletedObjects().size() != deleteList.size()) {
+                    throw new IOException(
+                        "Incomplete delete object request. only  "
+                            + dobjs.getDeletedObjects().size() + " out of "
+                            + deleteList.size() + " are deleted");
+                } else {
+                    LOG.info(deleteList.size()
+                        + " records deleted from datastore");
+                }
+            }
+            if (!prevObjectListing.isTruncated()) {
+                break;
+            }
+            prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
+        }
+        s3service.deleteBucket(bucketName);
+        s3service.shutdown();
+    }
+
+    /**
+     * Read a configuration properties file. If the file name ends with ";burn",
+     * the file is deleted after reading.
+     * 
+     * @param fileName the properties file name
+     * @return the properties
+     * @throws IOException if the file doesn't exist
+     */
+    public static Properties readConfig(String fileName) throws IOException {
+        boolean delete = false;
+        if (fileName.endsWith(DELETE_CONFIG_SUFFIX)) {
+            delete = true;
+            fileName = fileName.substring(0, fileName.length()
+                - DELETE_CONFIG_SUFFIX.length());
+        }
+        if (!new File(fileName).exists()) {
+            throw new IOException("Config file not found: " + fileName);
+        }
+        Properties prop = new Properties();
+        InputStream in = null;
+        try {
+            in = new FileInputStream(fileName);
+            prop.load(in);
+        } finally {
+            if (in != null) {
+                in.close();
+            }
+            if (delete) {
+                deleteIfPossible(new File(fileName));
+            }
+        }
+        return prop;
+    }
+
+    private static void deleteIfPossible(final File file) {
+        boolean deleted = file.delete();
+        if (!deleted) {
+            LOG.warn("Could not delete " + file.getAbsolutePath());
+        }
+    }
+
+}
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/InMemoryBackend.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/InMemoryBackend.java	(revision 0)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/InMemoryBackend.java	(working copy)
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jackrabbit.aws.ext.ds;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.jackrabbit.core.data.DataIdentifier;
+import org.apache.jackrabbit.core.data.DataStoreException;
+
+/**
+ * An in-memory backend implementation used to speed up testing.
+ */
+public class InMemoryBackend implements Backend {
+
+    private HashMap data = new HashMap();
+
+    private HashMap timeMap = new HashMap();
+
+    public void init(CachingDataStore store, String homeDir, String config)
+            throws DataStoreException {
+        // ignore
+        log("init");
+    }
+
+    public void close() {
+        // ignore
+        log("close");
+    }
+
+    public boolean exists(final DataIdentifier identifier) {
+        log("exists " + identifier);
+        return data.containsKey(identifier);
+    }
+
+    public Iterator getAllIdentifiers()
+            throws DataStoreException {
+        log("getAllIdentifiers");
+        return data.keySet().iterator();
+    }
+
+    public InputStream read(final DataIdentifier identifier)
+            throws DataStoreException {
+        log("read " + identifier);
+        return new ByteArrayInputStream(data.get(identifier));
+    }
+
+    public void write(final DataIdentifier identifier, final File file)
+            throws DataStoreException {
+        log("write " + identifier + " " + file.length());
+        byte[] buffer = new byte[(int) file.length()];
+        try {
+            DataInputStream din = new DataInputStream(new FileInputStream(file));
+            din.readFully(buffer);
+            din.close();
+            data.put(identifier, buffer);
+            timeMap.put(identifier, System.currentTimeMillis());
+        } catch (IOException e) {
+            throw new DataStoreException(e);
+        }
+    }
+
+    private void log(final String message) {
+        // System.out.println(message);
+    }
+
+    public long getLastModified(final DataIdentifier identifier)
+            throws DataStoreException {
+        log("getLastModified " + identifier);
+        return timeMap.get(identifier);
+    }
+
+    public void deleteRecord(final DataIdentifier identifier)
+            throws DataStoreException {
+        timeMap.remove(identifier);
+        data.remove(identifier);
+    }
+
+    public List deleteAllOlderThan(final long min) {
+        log("deleteAllOlderThan " + min);
+        List tobeDeleted = new ArrayList();
+        for (Map.Entry entry : timeMap.entrySet()) {
+            DataIdentifier identifier = entry.getKey();
+            long timestamp = entry.getValue();
+            if (timestamp < min) {
+                tobeDeleted.add(identifier);
+            }
+        }
+        for (DataIdentifier identifier : tobeDeleted) {
+            timeMap.remove(identifier);
+            data.remove(identifier);
+        }
+        return tobeDeleted;
+    }
+
+    public long getLength(final DataIdentifier identifier) throws DataStoreException {
+        try {
+            return data.get(identifier).length;
+        } catch (Exception e) {
+            throw new DataStoreException(e);
+        }
+    }
+
+    public void touch(final DataIdentifier identifier, final long minModifiedDate)
+            throws DataStoreException {
+        if (minModifiedDate > 0 && data.containsKey(identifier)) {
+            timeMap.put(identifier, System.currentTimeMillis());
+        }
+    }
+}
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/InMemoryDataStore.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/InMemoryDataStore.java	(revision 0)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/InMemoryDataStore.java	(working copy)
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.aws.ext.ds;
+
+/**
+ * A caching data store that uses the in-memory backend.
+ */
+public class InMemoryDataStore extends CachingDataStore {
+
+    Backend createBackend() {
+        return new InMemoryBackend();
+    }
+
+    String getMarkerFile() {
+        return "mem.init.done";
+    }
+
+}
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestCaseBase.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestCaseBase.java	(revision 0)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestCaseBase.java	(working copy)
@@ -0,0 +1,614 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.aws.ext.ds;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.aws.ext.LocalCache;
+import org.apache.jackrabbit.core.data.DataIdentifier;
+import org.apache.jackrabbit.core.data.DataRecord;
+import org.apache.jackrabbit.core.data.DataStore;
+import org.apache.jackrabbit.core.data.DataStoreException;
+import org.apache.jackrabbit.core.data.MultiDataStoreAware;
+import org.apache.jackrabbit.core.data.RandomInputStream;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test base class which covers all scenarios.
+ */
+public abstract class TestCaseBase extends TestCase {
+
+    /**
+     * temp directory
+     */
+    private static final String TEST_DIR = "target/temp";
+
+    /**
+     * Constant describing aws properties file path.
+     */
+    public static String CONFIG = "config";
+
+    /**
+     * File path of aws properties.
+     */
+    protected String config;
+
+    /**
+     * Parameter to use in-memory backend. If false {@link S3Backend}
+     */
+    protected boolean memoryBackend = true;
+
+    /**
+     * Parameter to use local cache. If true local cache {@link LocalCache} is
+     * not used.
+     */
+    protected boolean noCache;
+
+    /**
+     * Logger
+     */
+    protected static final Logger LOG = LoggerFactory.getLogger(TestCaseBase.class);
+
+    /**
+     * Delete temporary directory.
+     */
+    protected void setUp() {
+        FileUtils.deleteQuietly(new File(TEST_DIR));
+    }
+
+    /**
+     * Delete temporary directory.
+     */
+    protected void tearDown() throws IOException {
+        FileUtils.deleteQuietly(new File(TEST_DIR));
+    }
+
+    /**
+     * Testcase to validate {@link DataStore#addRecord(InputStream)} API.
+     */
+    public void testAddRecord() {
+        try {
+            doAddRecordTest();
+        } catch (Exception e) {
+            LOG.error("error:", e);
+            fail(e.getMessage());
+        }
+    }
+
+    /**
+     * Testcase to validate {@link DataStore#getRecord(DataIdentifier)} API.
+     */
+    public void testGetRecord() {
+        try {
+            doGetRecordTest();
+        } catch (Exception e) {
+            LOG.error("error:", e);
+        }
+    }
+
+    /**
+     * Testcase to validate {@link DataStore#getAllIdentifiers()} API.
+     */
+    public void testGetAllIdentifiers() {
+        try {
+            doGetAllIdentifiersTest();
+        } catch (Exception e) {
+            LOG.error("error:", e);
+            fail(e.getMessage());
+        }
+    }
+
+    /**
+     * Testcase to validate {@link DataStore#updateModifiedDateOnAccess(long)}
+     * API.
+     */
+    public void testUpdateLastModifiedOnAccess() {
+        try {
+            doUpdateLastModifiedOnAccessTest();
+        } catch (Exception e) {
+            LOG.error("error:", e);
+        }
+    }
+
+    /**
+     * Testcase to validate
+     * {@link MultiDataStoreAware#deleteRecord(DataIdentifier)}.API.
+     */
+    public void testDeleteRecord() {
+        try {
+            doDeleteRecordTest();
+        } catch (Exception e) {
+            LOG.error("error:", e);
+            fail(e.getMessage());
+        }
+    }
+
+    /**
+     * Testcase to validate {@link DataStore#deleteAllOlderThan(long)} API.
+     */
+    public void testDeleteAllOlderThan() {
+        try {
+            doDeleteAllOlderThan();
+        } catch (Exception e) {
+            LOG.error("error:", e);
+            fail(e.getMessage());
+        }
+    }
+
+    /**
+     * Testcase to validate {@link DataStore#getRecordFromReference(String)}
+     */
+    public void testReference() {
+        try {
+            doReferenceTest();
+        } catch (Exception e) {
+            LOG.error("error:", e);
+            fail(e.getMessage());
+        }
+    }
+
+    /**
+     * Testcase to validate mixed scenario use of {@link DataStore}.
+     */
+    public void testSingleThread() {
+        try {
+            doTestSingleThread();
+        } catch (Exception e) {
+            LOG.error("error:", e);
+            fail(e.getMessage());
+        }
+    }
+
+    /**
+     * Testcase to validate mixed scenario use of {@link DataStore} in
+     * multi-threaded concurrent environment.
+     */
+    public void testMultiThreaded() {
+        try {
+            doTestMultiThreaded();
+        } catch (Exception e) {
+            LOG.error("error:", e);
+            fail(e.getMessage());
+        }
+
+    }
+    /**
+     * Test {@link DataStore#addRecord(InputStream)} and assert length of added
+     * record.
+     */
+    protected void doAddRecordTest() throws Exception {
+        CachingDataStore ds = memoryBackend
+                ? new InMemoryDataStore()
+                : new S3DataStore();
+        ds.setConfig(config);
+        if (noCache) ds.setCacheSize(0);
+        ds.init(TEST_DIR);
+        byte[] data = new byte[12345];
+        new Random(12345).nextBytes(data);
+        DataRecord rec = ds.addRecord(new ByteArrayInputStream(data));
+        assertEquals(data.length, rec.getLength());
+        assertRecord(data, rec);
+        ds.close();
+    }
+
+    /**
+     * Test {@link DataStore#getRecord(DataIdentifier)} and assert length and
+     * inputstream.
+     */
+    protected void doGetRecordTest() throws Exception {
+        CachingDataStore ds = memoryBackend
+                ? new InMemoryDataStore()
+                : new S3DataStore();
+        ds.setConfig(config);
+        if (noCache) ds.setCacheSize(0);
+        ds.init(TEST_DIR);
+        byte[] data = new byte[12345];
+        new Random(12345).nextBytes(data);
+        DataRecord rec = ds.addRecord(new ByteArrayInputStream(data));
+        rec = ds.getRecord(rec.getIdentifier());
+        assertEquals(data.length, rec.getLength());
+        assertRecord(data, rec);
+        ds.close();
+    }
+
+    /**
+     * Test {@link MultiDataStoreAware#deleteRecord(DataIdentifier)}.
+     */
+    protected void doDeleteRecordTest() throws Exception {
+        CachingDataStore ds = memoryBackend
+                ? new InMemoryDataStore()
+                : new S3DataStore();
+        ds.setConfig(config);
+        if (noCache) ds.setCacheSize(0);
+        ds.init(TEST_DIR);
+        Random random = new Random(12345);
+        byte[] data1 = new byte[12345];
+        random.nextBytes(data1);
+        DataRecord rec1 = ds.addRecord(new ByteArrayInputStream(data1));
+
+        byte[] data2 = new byte[12345];
+        random.nextBytes(data2);
+        DataRecord rec2 = ds.addRecord(new ByteArrayInputStream(data2));
+
+        byte[] data3 = new byte[12345];
+        random.nextBytes(data3);
+        DataRecord rec3 = ds.addRecord(new ByteArrayInputStream(data3));
+
+        ds.deleteRecord(rec2.getIdentifier());
+
+        assertNull("rec2 should be null",
+            ds.getRecordIfStored(rec2.getIdentifier()));
+        assertEquals(new ByteArrayInputStream(data1),
+            ds.getRecord(rec1.getIdentifier()).getStream());
+        assertEquals(new ByteArrayInputStream(data3),
+            ds.getRecord(rec3.getIdentifier()).getStream());
+        ds.close();
+    }
+
+    /**
+     * Test {@link DataStore#getAllIdentifiers()} and asserts all identifiers
+     * are returned.
+     */
+    protected void doGetAllIdentifiersTest() throws Exception {
+        CachingDataStore ds = memoryBackend
+                ? new InMemoryDataStore()
+                : new S3DataStore();
+        ds.setConfig(config);
+        if (noCache) ds.setCacheSize(0);
+        ds.init(TEST_DIR);
+        List list = new ArrayList();
+        Random random = new Random(12345);
+        byte[] data = new byte[12345];
+        random.nextBytes(data);
+        DataRecord rec = ds.addRecord(new ByteArrayInputStream(data));
+        list.add(rec.getIdentifier());
+
+        data = new byte[12345];
+        random.nextBytes(data);
+        rec = ds.addRecord(new ByteArrayInputStream(data));
+        list.add(rec.getIdentifier());
+
+        data = new byte[12345];
+        random.nextBytes(data);
+        rec = ds.addRecord(new ByteArrayInputStream(data));
+        list.add(rec.getIdentifier());
+
+        Iterator itr = ds.getAllIdentifiers();
+        while (itr.hasNext()) {
+            assertTrue("record found on list", list.remove(itr.next()));
+        }
+        assertEquals(0, list.size());
+        ds.close();
+    }
+
+    /**
+     * Asserts that timestamp of all records accessed after
+     * {@link DataStore#updateModifiedDateOnAccess(long)} invocation.
+     */
+    protected void doUpdateLastModifiedOnAccessTest() throws Exception {
+        CachingDataStore ds = memoryBackend
+                ? new InMemoryDataStore()
+                : new S3DataStore();
+        ds.setConfig(config);
+        if (noCache) ds.setCacheSize(0);
+        ds.init(TEST_DIR);
+        Random random = new Random(12345);
+        byte[] data = new byte[12345];
+        random.nextBytes(data);
+        DataRecord rec1 = ds.addRecord(new ByteArrayInputStream(data));
+
+        data = new byte[12345];
+        random.nextBytes(data);
+        DataRecord rec2 = ds.addRecord(new ByteArrayInputStream(data));
+
+        Thread.sleep(1000);
+        long updateTime = System.currentTimeMillis();
+        ds.updateModifiedDateOnAccess(updateTime );
+        Thread.sleep(1000);
+        data = new byte[12345];
+        random.nextBytes(data);
+        DataRecord rec3 = ds.addRecord(new ByteArrayInputStream(data));
+
+        data = new byte[12345];
+        random.nextBytes(data);
+        DataRecord rec4 = ds.addRecord(new ByteArrayInputStream(data));
+
+        rec1 = ds.getRecord(rec1.getIdentifier());
+
+        assertEquals("rec1 touched", true,
+            ds.getLastModified(rec1.getIdentifier()) > updateTime);
+        assertEquals("rec2 not touched", true,
+            ds.getLastModified(rec2.getIdentifier()) < updateTime);
+        assertEquals("rec3 touched", true,
+            ds.getLastModified(rec3.getIdentifier()) > updateTime);
+        assertEquals("rec4 touched", true,
+            ds.getLastModified(rec4.getIdentifier()) > updateTime);
+        ds.close();
+
+    }
+
+    /**
+     * Asserts that {@link DataStore#deleteAllOlderThan(long)} only deleted
+     * records older than argument passed.
+     */
+    protected void doDeleteAllOlderThan() throws Exception {
+        CachingDataStore ds = memoryBackend
+                ? new InMemoryDataStore()
+                : new S3DataStore();
+        ds.setConfig(config);
+        if (noCache) ds.setCacheSize(0);
+        ds.init(TEST_DIR);
+        Random random = new Random(12345);
+        byte[] data = new byte[12345];
+        random.nextBytes(data);
+        DataRecord rec1 = ds.addRecord(new ByteArrayInputStream(data));
+
+        data = new byte[12345];
+        random.nextBytes(data);
+        DataRecord rec2 = ds.addRecord(new ByteArrayInputStream(data));
+        
+        Thread.sleep(2000);
+        long updateTime = System.currentTimeMillis();
+        ds.updateModifiedDateOnAccess(updateTime);
+
+        data = new byte[12345];
+        random.nextBytes(data);
+        DataRecord rec3 = ds.addRecord(new ByteArrayInputStream(data));
+
+        data = new byte[12345];
+        random.nextBytes(data);
+        DataRecord rec4 = ds.addRecord(new ByteArrayInputStream(data));
+
+        rec1 = ds.getRecord(rec1.getIdentifier());
+        ds.clearInUse();
+        assertEquals("only rec2 should be deleted", 1,
+            ds.deleteAllOlderThan(updateTime));
+        assertNull("rec2 should be null",
+            ds.getRecordIfStored(rec2.getIdentifier()));
+
+        Iterator itr = ds.getAllIdentifiers();
+        List list = new ArrayList();
+        list.add(rec1.getIdentifier());
+        list.add(rec3.getIdentifier());
+        list.add(rec4.getIdentifier());
+        while (itr.hasNext()) {
+            assertTrue("record found on list", list.remove(itr.next()));
+        }
+
+        assertEquals("touched records found", 0, list.size());
+        assertEquals("rec1 touched", true,
+            ds.getLastModified(rec1.getIdentifier()) > updateTime);
+        assertEquals("rec3 touched", true,
+            ds.getLastModified(rec3.getIdentifier()) > updateTime);
+        assertEquals("rec4 touched", true,
+            ds.getLastModified(rec4.getIdentifier()) > updateTime);
+        ds.close();
+
+    }
+
+    /**
+     * Test if record can be accessed via
+     * {@link DataStore#getRecordFromReference(String)}
+     */
+    public void doReferenceTest() throws Exception {
+        CachingDataStore ds = memoryBackend
+                ? new InMemoryDataStore()
+                : new S3DataStore();
+        ds.setConfig(config);
+        if (noCache) ds.setCacheSize(0);
+        ds.init(TEST_DIR);
+        ds.setSecret("12345");
+        byte[] data = new byte[12345];
+        new Random(12345).nextBytes(data);
+        String reference;
+        DataRecord record = ds.addRecord(new ByteArrayInputStream(data));
+        reference = record.getReference();
+        assertReference(data, reference, ds);
+        ds.close();
+    }
+
+    /**
+     * Method to validate mixed scenario use of {@link DataStore}.
+     */
+    protected void doTestSingleThread() throws Exception {
+        CachingDataStore ds = memoryBackend
+                ? new InMemoryDataStore()
+                : new S3DataStore();
+        ds.setConfig(config);
+        if (noCache) ds.setCacheSize(0);
+        ds.init(TEST_DIR);
+        doTestMultiThreaded(ds, 1);
+        ds.close();
+    }
+
+    /**
+     * Method to validate mixed scenario use of {@link DataStore} in
+     * multi-threaded concurrent environment.
+     */
+    protected void doTestMultiThreaded() throws Exception {
+        CachingDataStore ds = memoryBackend
+                ? new InMemoryDataStore()
+                : new S3DataStore();
+        ds.setConfig(config);
+        if (noCache) ds.setCacheSize(0);
+        ds.init(TEST_DIR);
+        doTestMultiThreaded(ds, 4);
+        ds.close();
+    }
+
+    /**
+     * Method to assert record with byte array.
+     */
+    protected void assertRecord(byte[] expected, DataRecord record)
+            throws DataStoreException, IOException {
+        InputStream stream = record.getStream();
+        try {
+            for (int i = 0; i < expected.length; i++) {
+                assertEquals(expected[i] & 0xff, stream.read());
+            }
+            assertEquals(-1, stream.read());
+        } finally {
+            stream.close();
+        }
+    }
+
+    /**
+     * Method to run {@link TestCaseBase#doTest(DataStore, int)} in multiple
+     * concurrent threads.
+     */
+    protected void doTestMultiThreaded(final DataStore ds, int threadCount)
+            throws Exception {
+        final Exception[] exception = new Exception[1];
+        Thread[] threads = new Thread[threadCount];
+        for (int i = 0; i < threadCount; i++) {
+            final int x = i;
+            Thread t = new Thread() {
+                public void run() {
+                    try {
+                        doTest(ds, x);
+                    } catch (Exception e) {
+                        exception[0] = e;
+                    }
+                }
+            };
+            threads[i] = t;
+            t.start();
+        }
+        for (int i = 0; i < threadCount; i++) {
+            threads[i].join();
+        }
+        if (exception[0] != null) {
+            throw exception[0];
+        }
+    }
+
+    /**
+     * Assert randomly read stream from record.
+     */
+    void doTest(DataStore ds, int offset) throws Exception {
+        LOG.info(Thread.currentThread().getName() + " started.");
+        ArrayList list = new ArrayList();
+        HashMap map = new HashMap();
+        for (int i = 0; i < 100; i++) {
+            int size = 100 + i * 10;
+            RandomInputStream in = new RandomInputStream(size + offset, size);
+            DataRecord rec = ds.addRecord(in);
+            list.add(rec);
+            map.put(rec, new Integer(size));
+        }
+        Random random = new Random(1);
+        for (int i = 0; i < list.size(); i++) {
+            int pos = random.nextInt(list.size());
+            DataRecord rec = list.get(pos);
+            int size = map.get(rec);
+            rec = ds.getRecord(rec.getIdentifier());
+            assertEquals(size, rec.getLength());
+            InputStream in = rec.getStream();
+            RandomInputStream expected = new RandomInputStream(size + offset,
+                size);
+            if (random.nextBoolean()) {
+                in = readInputStreamRandomly(in, random);
+            }
+            assertEquals(expected, in);
+            in.close();
+        }
+        LOG.info(Thread.currentThread().getName() + " finished.");
+    }
+
+    InputStream readInputStreamRandomly(InputStream in, Random random)
+            throws IOException {
+        ByteArrayOutputStream out = new ByteArrayOutputStream();
+        byte[] buffer = new byte[8000];
+        while (true) {
+            if (random.nextBoolean()) {
+                int x = in.read();
+                if (x < 0) {
+                    break;
+                }
+                out.write(x);
+            } else {
+                if (random.nextBoolean()) {
+                    int l = in.read(buffer);
+                    if (l < 0) {
+                        break;
+                    }
+                    out.write(buffer, 0, l);
+                } else {
+                    int offset = random.nextInt(buffer.length / 2);
+                    int len = random.nextInt(buffer.length / 2);
+                    int l = in.read(buffer, offset, len);
+                    if (l < 0) {
+                        break;
+                    }
+                    out.write(buffer, offset, l);
+                }
+            }
+        }
+        in.close();
+        return new ByteArrayInputStream(out.toByteArray());
+    }
+
+    /**
+     * Assert two inputstream
+     */
+    protected void assertEquals(InputStream a, InputStream b)
+            throws IOException {
+        while (true) {
+            int ai = a.read();
+            int bi = b.read();
+            assertEquals(ai, bi);
+            if (ai < 0) {
+                break;
+            }
+        }
+    }
+
+    /**
+     * Assert inputstream read from reference.
+     */
+    protected void assertReference(byte[] expected, String reference,
+            DataStore store) throws Exception {
+        DataRecord record = store.getRecordFromReference(reference);
+        assertNotNull(record);
+        assertEquals(expected.length, record.getLength());
+
+        InputStream stream = record.getStream();
+        try {
+            for (int i = 0; i < expected.length; i++) {
+                assertEquals(expected[i] & 0xff, stream.read());
+            }
+            assertEquals(-1, stream.read());
+        } finally {
+            stream.close();
+        }
+    }
+
+}
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestInMemDs.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestInMemDs.java	(revision 0)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestInMemDs.java	(working copy)
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.aws.ext.ds;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test {@link CachingDataStore} with InMemoryBackend and local cache on.
+ */
+public class TestInMemDs extends TestCaseBase {
+
+    protected static final Logger LOG = LoggerFactory.getLogger(TestInMemDs.class);
+
+    public TestInMemDs() {
+        config = null;
+        memoryBackend = true;
+        noCache = false;
+    }
+
+}
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestInMemDsCacheOff.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestInMemDsCacheOff.java	(revision 0)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestInMemDsCacheOff.java	(working copy)
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.aws.ext.ds;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test {@link CachingDataStore} with InMemoryBackend and local cache off.
+ */
+public class TestInMemDsCacheOff extends TestCaseBase {
+
+    protected static final Logger LOG = LoggerFactory.getLogger(TestInMemDsCacheOff.class);
+
+    public TestInMemDsCacheOff() {
+        config = null;
+        memoryBackend = true;
+        noCache = true;
+    }
+}
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestS3Ds.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestS3Ds.java	(revision 0)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestS3Ds.java	(working copy)
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.aws.ext.ds;
+
+import java.io.IOException;
+import java.util.Properties;
+
+import org.apache.jackrabbit.aws.ext.Utils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test {@link CachingDataStore} with S3Backend and local cache on. It requires
+ * to pass aws config file via system property. For e.g.
+ * -Dconfig=/opt/cq/aws.properties. Sample aws properties located at
+ * src/test/resources/aws.properties
+ */
+public class TestS3Ds extends TestCaseBase {
+
+    protected static final Logger LOG = LoggerFactory.getLogger(TestS3Ds.class);
+
+    public TestS3Ds() {
+        config = System.getProperty(CONFIG);
+        memoryBackend = false;
+        noCache = false;
+    }
+
+
+    protected void tearDown() throws IOException {
+        super.tearDown();
+        Properties props = Utils.readConfig(config);
+        Utils.deleteBucket(props);
+    }
+
+}
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestS3DsCacheOff.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestS3DsCacheOff.java	(revision 0)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/ds/TestS3DsCacheOff.java	(working copy)
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.aws.ext.ds;
+
+/**
+ * Test {@link CachingDataStore} with S3Backend and local cache Off. It requires
+ * to pass aws config file via system property. For e.g.
+ * -Dconfig=/opt/cq/aws.properties. Sample aws properties located at
+ * src/test/resources/aws.properties
+ */
+public class TestS3DsCacheOff extends TestS3Ds {
+
+    public TestS3DsCacheOff() {
+        config = System.getProperty(CONFIG);
+        memoryBackend = false;
+        noCache = true;
+    }
+}
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/TestAll.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/TestAll.java	(revision 0)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/TestAll.java	(working copy)
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jackrabbit.aws.ext;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import org.apache.jackrabbit.aws.ext.ds.TestCaseBase;
+import org.apache.jackrabbit.aws.ext.ds.TestInMemDs;
+import org.apache.jackrabbit.aws.ext.ds.TestInMemDsCacheOff;
+import org.apache.jackrabbit.aws.ext.ds.TestS3Ds;
+import org.apache.jackrabbit.aws.ext.ds.TestS3DsCacheOff;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test suite that includes all test cases for the this module.
+ */
+public class TestAll extends TestCase {
+
+    private static final Logger LOG = LoggerFactory.getLogger(TestAll.class);
+
+    /**
+     * TestAll suite that executes all tests inside this module. To
+     * run test cases agains Amazon S3 pass AWS configuration properties file as
+     * system property -Dconfig=/opt/cq/aws.properties. Sample aws properties
+     * located at src/test/resources/aws.properties.
+     */
+    public static Test suite() {
+        TestSuite suite = new TestSuite("S3 tests");
+        suite.addTestSuite(TestLocalCache.class);
+        suite.addTestSuite(TestInMemDs.class);
+        suite.addTestSuite(TestInMemDsCacheOff.class);
+        String config = System.getProperty(TestCaseBase.CONFIG);
+        LOG.info("config= " + config);
+        if (config != null && !"".equals(config.trim())) {
+            suite.addTestSuite(TestS3Ds.class);
+            suite.addTestSuite(TestS3DsCacheOff.class);
+        }
+        return suite;
+    }
+}
Index: jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/TestLocalCache.java
===================================================================
--- jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/TestLocalCache.java	(revision 0)
+++ jackrabbit-aws-ext/src/test/java/org/apache/jackrabbit/aws/ext/TestLocalCache.java	(working copy)
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.aws.ext;
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.jackrabbit.aws.ext.ds.TestCaseBase;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Testcase to test local cache.
+ */
+public class TestLocalCache extends TestCase {
+
+    private static final String CACHE_DIR = "target/cache";
+
+    private static final String TEMP_DIR = "target/temp";
+
+    private static final Logger LOG = LoggerFactory.getLogger(TestLocalCache.class);
+
+    protected void setUp() {
+        try {
+            File cachedir = new File(CACHE_DIR);
+            if (cachedir.exists()) FileUtils.deleteQuietly(cachedir);
+            cachedir.mkdirs();
+
+            File tempdir = new File(TEMP_DIR);
+            if (tempdir.exists()) FileUtils.deleteQuietly(tempdir);
+            tempdir.mkdirs();
+        } catch (Exception e) {
+            LOG.error("error:", e);
+            fail();
+        }
+    }
+
+    protected void tearDown() throws IOException {
+        File cachedir = new File(CACHE_DIR);
+        if (cachedir.exists()) FileUtils.deleteQuietly(cachedir);
+
+        File tempdir = new File(TEMP_DIR);
+        if (tempdir.exists()) FileUtils.deleteQuietly(tempdir);
+    }
+
+    /**
+     * Test to validate store retrieve in cache.
+     */
+    public void testStoreRetrieve() {
+        try {
+            LocalCache cache = new LocalCache(CACHE_DIR, TEMP_DIR, 400, 0.95,
+                0.70);
+            Random random = new Random(12345);
+            byte[] data = new byte[100];
+            Map byteMap = new HashMap();
+            random.nextBytes(data);
+            byteMap.put("a1", data);
+
+            data = new byte[100];
+            random.nextBytes(data);
+            byteMap.put("a2", data);
+
+            data = new byte[100];
+            random.nextBytes(data);
+            byteMap.put("a3", data);
+
+            cache.store("a1", new ByteArrayInputStream(byteMap.get("a1")));
+            cache.store("a2", new ByteArrayInputStream(byteMap.get("a2")));
+            cache.store("a3", new ByteArrayInputStream(byteMap.get("a3")));
+            assertEquals(new ByteArrayInputStream(byteMap.get("a1")),
+                cache.getIfStored("a1"));
+            assertEquals(new ByteArrayInputStream(byteMap.get("a2")),
+                cache.getIfStored("a2"));
+            assertEquals(new ByteArrayInputStream(byteMap.get("a3")),
+                cache.getIfStored("a3"));
+        } catch (Exception e) {
+            LOG.error("error:", e);
+            fail();
+        }
+
+    }
+
+    /**
+     * Test to verify cache's purging if cache current size exceeds
+     * cachePurgeTrigFactor * size.
+     */
+    public void testAutoPurge() {
+        try {
+
+            LocalCache cache = new LocalCache(CACHE_DIR, TEMP_DIR, 400, 0.95,
+                0.70);
+            Random random = new Random(12345);
+            byte[] data = new byte[100];
+            Map byteMap = new HashMap();
+            random.nextBytes(data);
+            byteMap.put("a1", data);
+
+            data = new byte[100];
+            random.nextBytes(data);
+            byteMap.put("a2", data);
+
+            data = new byte[100];
+            random.nextBytes(data);
+            byteMap.put("a3", data);
+
+            data = new byte[100];
+            random.nextBytes(data);
+            byteMap.put("a4", data);
+
+            cache.store("a1", new ByteArrayInputStream(byteMap.get("a1")));
+            cache.store("a2", new ByteArrayInputStream(byteMap.get("a2")));
+            cache.store("a3", new ByteArrayInputStream(byteMap.get("a3")));
+            assertEquals(new ByteArrayInputStream(byteMap.get("a1")),
+                cache.getIfStored("a1"));
+            assertEquals(new ByteArrayInputStream(byteMap.get("a2")),
+                cache.getIfStored("a2"));
+            assertEquals(new ByteArrayInputStream(byteMap.get("a3")),
+                cache.getIfStored("a3"));
+
+            data = new byte[90];
+            random.nextBytes(data);
+            byteMap.put("a4", data);
+            // storing a4 should purge cache
+            cache.store("a4", new ByteArrayInputStream(byteMap.get("a4")));
+            Thread.sleep(1000);
+
+            assertNull("a1 should be null", cache.getIfStored("a1"));
+            assertNull("a2 should be null", cache.getIfStored("a2"));
+            assertEquals(new ByteArrayInputStream(byteMap.get("a3")),
+                cache.getIfStored("a3"));
+            assertEquals(new ByteArrayInputStream(byteMap.get("a4")),
+                cache.getIfStored("a4"));
+            data = new byte[100];
+            random.nextBytes(data);
+            byteMap.put("a5", data);
+            cache.store("a5", new ByteArrayInputStream(byteMap.get("a5")));
+            assertEquals(new ByteArrayInputStream(byteMap.get("a3")),
+                cache.getIfStored("a3"));
+        } catch (Exception e) {
+            LOG.error("error:", e);
+            fail();
+        }
+    }
+    
+    /**
+     * Assert two inputstream
+     */
+    protected void assertEquals(InputStream a, InputStream b)
+            throws IOException {
+        while (true) {
+            int ai = a.read();
+            int bi = b.read();
+            assertEquals(ai, bi);
+            if (ai < 0) {
+                break;
+            }
+        }
+    }
+
+}
Index: jackrabbit-aws-ext/src/test/resources/aws.properties
===================================================================
--- jackrabbit-aws-ext/src/test/resources/aws.properties	(revision 0)
+++ jackrabbit-aws-ext/src/test/resources/aws.properties	(working copy)
@@ -0,0 +1,38 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# AWS account ID
+accessKey=
+# AWS secret key
+secretKey=
+# AWS bucket name
+s3Bucket=
+# AWS bucket region
+# Mapping of S3 regions to their constants
+# US Standard us-standard
+# US West us-west-2
+# US West (Northern California) us-west-1
+# EU (Ireland) EU
+# Asia Pacific (Singapore) ap-southeast-1
+# Asia Pacific (Sydney) ap-southeast-2
+# Asia Pacific (Tokyo) ap-northeast-1
+# South America (Sao Paulo) sa-east-1
+s3Region=
+connectionTimeout=120000
+socketTimeout=120000
+maxConnections=10
+maxErrorRetry=10
Index: jackrabbit-aws-ext/src/test/resources/log4j.properties
===================================================================
--- jackrabbit-aws-ext/src/test/resources/log4j.properties	(revision 0)
+++ jackrabbit-aws-ext/src/test/resources/log4j.properties	(working copy)
@@ -0,0 +1,29 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# this is the log4j configuration for the JCR API tests
+log4j.rootLogger=INFO, file
+
+#log4j.logger.org.apache.jackrabbit.test=DEBUG
+
+# 'file' is set to be a FileAppender.
+log4j.appender.file=org.apache.log4j.FileAppender
+log4j.appender.file.File=target/debug.log
+
+# 'file' uses PatternLayout.
+log4j.appender.file.layout=org.apache.log4j.PatternLayout
+log4j.appender.file.layout.ConversionPattern=%d{dd.MM.yyyy HH:mm:ss} *%-5p* [%t] %c{1}: %m (%F, line %L)\n
Index: jackrabbit-aws-ext/src/test/resources/repository_sample.xml
===================================================================
--- jackrabbit-aws-ext/src/test/resources/repository_sample.xml	(revision 0)
+++ jackrabbit-aws-ext/src/test/resources/repository_sample.xml	(working copy)
@@ -0,0 +1,166 @@
+
+
+
+
+
+    
+    
+        
+    
+
+    
+	
+	
+		
+        
+         
+        
+        
+        
+	
+    
+    
+    
+
+    
+    
+        
+        
+            
+        
+
+        
+        
+            
+        
+
+        
+           
+           
+           
+           
+           
+           
+        
+    
+
+    
+    
+    
+    
+        
+        
+            
+        
+        
+        
+          
+          
+        
+        
+        
+            
+        
+    
+
+    
+    
+        
+        
+            
+        
+
+        
+        
+          
+          
+        
+    
+
+    
+    
+        
+    
+    
+    
+    
+        
+    
+
Index: pom.xml
===================================================================
--- pom.xml	(revision 1521073)
+++ pom.xml	(working copy)
@@ -54,6 +54,7 @@
     jackrabbit-spi2dav
     jackrabbit-jcr2dav
     jackrabbit-jcr-client
+    jackrabbit-aws-ext
     jackrabbit-bundle
     jackrabbit-standalone