diff --git a/oak-blob-cloud-azure/pom.xml b/oak-blob-cloud-azure/pom.xml
new file mode 100644
index 0000000000..520765ea2e
--- /dev/null
+++ b/oak-blob-cloud-azure/pom.xml
@@ -0,0 +1,174 @@
+
+
+
+
+
+
+ oak-parent
+ org.apache.jackrabbit
+ 1.6.2-SNAPSHOT
+ ../oak-parent/pom.xml
+
+ 4.0.0
+
+ oak-blob-cloud-azure
+ Oak Azure Cloud Blob Store
+ bundle
+
+
+
+
+ org.apache.felix
+ maven-scr-plugin
+
+
+ org.apache.felix
+ maven-bundle-plugin
+
+
+
+ org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage
+
+ sun.io
+
+ azure-storage,
+ azure-keyvault-core
+
+
+
+
+
+
+
+
+
+
+
+
+
+ org.osgi
+ org.osgi.core
+ provided
+
+
+ org.osgi
+ org.osgi.compendium
+ provided
+
+
+ biz.aQute.bnd
+ bndlib
+ provided
+
+
+ org.apache.felix
+ org.apache.felix.scr.annotations
+ provided
+
+
+
+
+ javax.jcr
+ jcr
+ 2.0
+
+
+ org.apache.jackrabbit
+ jackrabbit-jcr-commons
+ ${jackrabbit.version}
+
+
+ org.apache.jackrabbit
+ jackrabbit-data
+ ${jackrabbit.version}
+
+
+
+
+ org.apache.jackrabbit
+ oak-commons
+ ${project.version}
+
+
+
+ org.apache.jackrabbit
+ oak-blob
+ ${project.version}
+
+
+ org.apache.jackrabbit
+ oak-core
+ ${project.version}
+
+
+
+
+ com.microsoft.azure
+ azure-storage
+ 5.0.0
+
+
+ com.microsoft.azure
+ azure-keyvault-core
+ 0.9.7
+
+
+
+
+ org.apache.jackrabbit
+ jackrabbit-data
+ ${jackrabbit.version}
+ tests
+ test
+
+
+ org.apache.jackrabbit
+ oak-core
+ ${project.version}
+ tests
+ test
+
+
+ junit
+ junit
+ test
+
+
+ org.slf4j
+ jul-to-slf4j
+ test
+
+
+ ch.qos.logback
+ logback-classic
+ test
+
+
+ org.apache.sling
+ org.apache.sling.testing.osgi-mock
+ test
+
+
+ org.mockito
+ mockito-core
+ 1.10.19
+ test
+
+
+
+
diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AbstractAzureDataStoreService.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AbstractAzureDataStoreService.java
new file mode 100644
index 0000000000..6e4a325e04
--- /dev/null
+++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AbstractAzureDataStoreService.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+import org.apache.jackrabbit.core.data.DataStore;
+import org.apache.jackrabbit.core.data.DataStoreException;
+import org.apache.jackrabbit.oak.plugins.blob.AbstractSharedCachingDataStore;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.AbstractDataStoreService;
+import org.osgi.framework.Constants;
+import org.osgi.framework.ServiceRegistration;
+import org.osgi.service.component.ComponentContext;
+
+import java.util.Dictionary;
+import java.util.Hashtable;
+import java.util.Map;
+import java.util.Properties;
+
+public abstract class AbstractAzureDataStoreService extends AbstractDataStoreService {
+ private static final String DESCRIPTION = "oak.datastore.description";
+
+ private ServiceRegistration delegateReg;
+
+ @Override
+ protected DataStore createDataStore(ComponentContext context, Map config) {
+ Properties properties = new Properties();
+ properties.putAll(config);
+
+ AzureDataStore dataStore = new AzureDataStore();
+ dataStore.setStatisticsProvider(getStatisticsProvider());
+ dataStore.setProperties(properties);
+
+ Dictionary props = new Hashtable();
+ props.put(Constants.SERVICE_PID, dataStore.getClass().getName());
+ props.put(DESCRIPTION, getDescription());
+
+ delegateReg = context.getBundleContext().registerService(new String[] {
+ AbstractSharedCachingDataStore.class.getName(),
+ AbstractSharedCachingDataStore.class.getName()
+ }, dataStore , props);
+
+ return dataStore;
+ }
+
+ protected void deactivate() throws DataStoreException {
+ if (delegateReg != null) {
+ delegateReg.unregister();
+ }
+ super.deactivate();
+ }
+
+ @Override
+ protected String[] getDescription() {
+ return new String[] {"type=AzureBlob"};
+ }
+}
diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java
new file mode 100644
index 0000000000..c307792e6c
--- /dev/null
+++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureBlobStoreBackend.java
@@ -0,0 +1,789 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+import com.google.common.base.Function;
+import com.google.common.base.Strings;
+import com.google.common.collect.AbstractIterator;
+import com.google.common.collect.Lists;
+import com.microsoft.azure.storage.RequestOptions;
+import com.microsoft.azure.storage.ResultContinuation;
+import com.microsoft.azure.storage.ResultSegment;
+import com.microsoft.azure.storage.RetryPolicy;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.BlobListingDetails;
+import com.microsoft.azure.storage.blob.BlobRequestOptions;
+import com.microsoft.azure.storage.blob.CloudBlob;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import com.microsoft.azure.storage.blob.CloudBlobDirectory;
+import com.microsoft.azure.storage.blob.CloudBlockBlob;
+import com.microsoft.azure.storage.blob.CopyStatus;
+import com.microsoft.azure.storage.blob.ListBlobItem;
+import org.apache.jackrabbit.core.data.DataIdentifier;
+import org.apache.jackrabbit.core.data.DataRecord;
+import org.apache.jackrabbit.core.data.DataStoreException;
+import org.apache.jackrabbit.oak.commons.PropertiesUtil;
+import org.apache.jackrabbit.oak.spi.blob.AbstractDataRecord;
+import org.apache.jackrabbit.oak.spi.blob.AbstractSharedBackend;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.UnsupportedEncodingException;
+import java.net.URISyntaxException;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.Queue;
+
+import static java.lang.Thread.currentThread;
+
+public class AzureBlobStoreBackend extends AbstractSharedBackend {
+
+ private static final Logger LOG = LoggerFactory.getLogger(AzureBlobStoreBackend.class);
+
+ private static final String META_DIR_NAME = "META";
+ private static final String META_KEY_PREFIX = META_DIR_NAME + "/";
+
+ private static final long BUFFERED_STREAM_THRESHHOLD = 1024 * 1024;
+
+ private Properties properties;
+ private String containerName;
+ private String connectionString;
+ private int concurrentRequestCount = 1;
+ private RetryPolicy retryPolicy;
+ private Integer requestTimeout;
+
+ private String secret;
+
+ public void setProperties(final Properties properties) {
+ this.properties = properties;
+ }
+
+ protected CloudBlobContainer getAzureContainer() throws DataStoreException {
+ CloudBlobContainer container = Utils.getBlobContainer(connectionString, containerName);
+ RequestOptions requestOptions = container.getServiceClient().getDefaultRequestOptions();
+ if (retryPolicy != null) {
+ requestOptions.setRetryPolicyFactory(retryPolicy);
+ }
+ if (requestTimeout != null) {
+ requestOptions.setTimeoutIntervalInMs(requestTimeout);
+ }
+ return container;
+ }
+
+ @Override
+ public void init() throws DataStoreException {
+ ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
+ long start = System.currentTimeMillis();
+ try {
+ Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
+ LOG.debug("Started backend initialization");
+
+ if (null == properties) {
+ try {
+ properties = Utils.readConfig(Utils.DEFAULT_CONFIG_FILE);
+ }
+ catch (IOException e) {
+ throw new DataStoreException("Unable to initialize Azure Data Store from " + Utils.DEFAULT_CONFIG_FILE, e);
+ }
+ }
+ secret = properties.getProperty("secret");
+
+ try {
+ Utils.setProxyIfNeeded(properties);
+ containerName = (String) properties.get(AzureConstants.AZURE_BLOB_CONTAINER_NAME);
+ connectionString = Utils.getConnectionStringFromProperties(properties);
+ concurrentRequestCount = PropertiesUtil.toInteger(properties.get(AzureConstants.AZURE_BLOB_CONCURRENT_REQUESTS_PER_OPERATION), 1);
+ LOG.info("Using concurrentRequestsPerOperation={}", concurrentRequestCount);
+ retryPolicy = Utils.getRetryPolicy((String)properties.get(AzureConstants.AZURE_BLOB_MAX_REQUEST_RETRY));
+ if (properties.getProperty(AzureConstants.AZURE_BLOB_REQUEST_TIMEOUT) != null) {
+ requestTimeout = PropertiesUtil.toInteger(properties.getProperty(AzureConstants.AZURE_BLOB_REQUEST_TIMEOUT), RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT);
+ }
+
+ CloudBlobContainer azureContainer = getAzureContainer();
+
+ if (azureContainer.createIfNotExists()) {
+ LOG.info("New container created. containerName={}", containerName);
+ } else {
+ LOG.info("Reusing existing container. containerName={}", containerName);
+ }
+ LOG.debug("Backend initialized. duration={}",
+ +(System.currentTimeMillis() - start));
+ }
+ catch (StorageException e) {
+ throw new DataStoreException(e);
+ }
+ }
+ finally {
+ Thread.currentThread().setContextClassLoader(contextClassLoader);
+ }
+ }
+
+ @Override
+ public InputStream read(DataIdentifier identifier) throws DataStoreException {
+ if (null == identifier) throw new NullPointerException("identifier");
+
+ String key = getKeyName(identifier);
+ long start = System.currentTimeMillis();
+ ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
+ try {
+ Thread.currentThread().setContextClassLoader(
+ getClass().getClassLoader());
+ CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(key);
+ if (!blob.exists()) {
+ throw new DataStoreException(String.format("Trying to read missing blob. identifier=%s", key));
+ }
+
+ InputStream is = blob.openInputStream();
+ LOG.debug("Got input stream for blob. identifier={} duration={}", key, (System.currentTimeMillis() - start));
+ return is;
+ }
+ catch (StorageException e) {
+ LOG.info("Error reading blob. identifier=%s", key);
+ throw new DataStoreException(String.format("Cannot read blob. identifier=%s", key), e);
+ }
+ catch (URISyntaxException e) {
+ LOG.debug("Error reading blob. identifier=%s", key);
+ throw new DataStoreException(String.format("Cannot read blob. identifier=%s", key), e);
+ } finally {
+ if (contextClassLoader != null) {
+ Thread.currentThread().setContextClassLoader(contextClassLoader);
+ }
+ }
+ }
+
+ @Override
+ public void write(DataIdentifier identifier, File file) throws DataStoreException {
+ if (null == identifier) {
+ throw new NullPointerException("identifier");
+ }
+ if (null == file) {
+ throw new NullPointerException("file");
+ }
+ String key = getKeyName(identifier);
+ long start = System.currentTimeMillis();
+ ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
+
+ try {
+ Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
+
+ long len = file.length();
+ LOG.debug("Blob write started. identifier={} length={}", key, len);
+ CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(key);
+ if (!blob.exists()) {
+ BlobRequestOptions options = new BlobRequestOptions();
+ options.setConcurrentRequestCount(concurrentRequestCount);
+ boolean useBufferedStream = len < BUFFERED_STREAM_THRESHHOLD;
+ final InputStream in = useBufferedStream ? new BufferedInputStream(new FileInputStream(file)) : new FileInputStream(file);
+ try {
+ blob.upload(in, len, null, options, null);
+ LOG.debug("Blob created. identifier={} length={} duration={} buffered={}", key, len, (System.currentTimeMillis() - start), useBufferedStream);
+ } finally {
+ in.close();
+ }
+ return;
+ }
+
+ blob.downloadAttributes();
+ if (blob.getProperties().getLength() != len) {
+ throw new DataStoreException("Length Collision. identifier=" + key +
+ " new length=" + len +
+ " old length=" + blob.getProperties().getLength());
+ }
+ LOG.trace("Blob already exists. identifier={} lastModified={}", key, blob.getProperties().getLastModified().getTime());
+ blob.startCopy(blob);
+ //TODO: better way of updating lastModified (use custom metadata?)
+ if (!waitForCopy(blob)) {
+ throw new DataStoreException(
+ String.format("Cannot update lastModified for blob. identifier=%s status=%s",
+ key, blob.getCopyState().getStatusDescription()));
+ }
+ LOG.debug("Blob updated. identifier={} lastModified={} duration={}", key,
+ blob.getProperties().getLastModified().getTime(), (System.currentTimeMillis() - start));
+ }
+ catch (StorageException e) {
+ LOG.info("Error writing blob. identifier={}", key, e);
+ throw new DataStoreException(String.format("Cannot write blob. identifier=%s", key), e);
+ }
+ catch (URISyntaxException | IOException e) {
+ LOG.debug("Error writing blob. identifier={}", key, e);
+ throw new DataStoreException(String.format("Cannot write blob. identifier=%s", key), e);
+ } catch (InterruptedException e) {
+ LOG.debug("Error writing blob. identifier={}", key, e);
+ throw new DataStoreException(String.format("Cannot copy blob. identifier=%s", key), e);
+ } finally {
+ if (null != contextClassLoader) {
+ Thread.currentThread().setContextClassLoader(contextClassLoader);
+ }
+ }
+ }
+
+ private static boolean waitForCopy(CloudBlob blob) throws StorageException, InterruptedException {
+ boolean continueLoop = true;
+ CopyStatus status = CopyStatus.PENDING;
+ while (continueLoop) {
+ blob.downloadAttributes();
+ status = blob.getCopyState().getStatus();
+ continueLoop = status == CopyStatus.PENDING;
+ // Sleep if retry is needed
+ if (continueLoop) {
+ Thread.sleep(500);
+ }
+ }
+ return status == CopyStatus.SUCCESS;
+ }
+
+ @Override
+ public byte[] getOrCreateReferenceKey() throws DataStoreException {
+ try {
+ if (!Strings.isNullOrEmpty(secret)) {
+ return secret.getBytes("UTF-8");
+ }
+ LOG.warn("secret not defined");
+ return super.getOrCreateReferenceKey();
+ } catch (UnsupportedEncodingException e) {
+ throw new DataStoreException(e);
+ }
+ }
+
+ @Override
+ public DataRecord getRecord(DataIdentifier identifier) throws DataStoreException {
+ if (null == identifier) {
+ throw new NullPointerException("identifier");
+ }
+ String key = getKeyName(identifier);
+ long start = System.currentTimeMillis();
+ ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
+ try {
+ Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
+
+ CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(key);
+ if (blob.exists()) {
+ blob.downloadAttributes();
+ AzureBlobStoreDataRecord record = new AzureBlobStoreDataRecord(
+ this,
+ connectionString,
+ containerName,
+ new DataIdentifier(getIdentifierName(blob.getName())),
+ blob.getProperties().getLastModified().getTime(),
+ blob.getProperties().getLength());
+ LOG.debug("Data record read for blob. identifier={} duration={} record={}",
+ key, (System.currentTimeMillis() - start), record);
+ return record;
+ } else {
+ LOG.debug("Blob not found. identifier={} duration={}",
+ key, (System.currentTimeMillis() - start));
+ throw new DataStoreException(String.format("Cannot find blob. identifier=%s", key));
+ }
+ }catch (StorageException e) {
+ LOG.info("Error getting data record for blob. identifier={}", key, e);
+ throw new DataStoreException(String.format("Cannot retrieve blob. identifier=%s", key), e);
+ }
+ catch (URISyntaxException e) {
+ LOG.debug("Error getting data record for blob. identifier={}", key, e);
+ throw new DataStoreException(String.format("Cannot retrieve blob. identifier=%s", key), e);
+ } finally {
+ if (contextClassLoader != null) {
+ Thread.currentThread().setContextClassLoader(contextClassLoader);
+ }
+ }
+ }
+
+ @Override
+ public Iterator getAllIdentifiers() throws DataStoreException {
+ return new RecordsIterator(
+ new Function() {
+ @Override
+ public DataIdentifier apply(AzureBlobInfo input) {
+ return new DataIdentifier(getIdentifierName(input.getName()));
+ }
+ }
+ );
+ }
+
+
+
+ @Override
+ public Iterator getAllRecords() throws DataStoreException {
+ final AbstractSharedBackend backend = this;
+ return new RecordsIterator(
+ new Function() {
+ @Override
+ public DataRecord apply(AzureBlobInfo input) {
+ return new AzureBlobStoreDataRecord(
+ backend,
+ connectionString,
+ containerName,
+ new DataIdentifier(getIdentifierName(input.getName())),
+ input.getLastModified(),
+ input.getLength());
+ }
+ }
+ );
+ }
+
+ @Override
+ public boolean exists(DataIdentifier identifier) throws DataStoreException {
+ long start = System.currentTimeMillis();
+ String key = getKeyName(identifier);
+ ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
+ try {
+ Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
+
+ boolean exists =getAzureContainer().getBlockBlobReference(key).exists();
+ LOG.debug("Blob exists={} identifier={} duration={}", exists, key, (System.currentTimeMillis() - start));
+ return exists;
+ }
+ catch (Exception e) {
+ throw new DataStoreException(e);
+ }
+ finally {
+ if (null != contextClassLoader) {
+ Thread.currentThread().setContextClassLoader(contextClassLoader);
+ }
+ }
+ }
+
+ @Override
+ public void close() throws DataStoreException {
+ LOG.info("AzureBlobBackend closed.");
+ }
+
+ @Override
+ public void deleteRecord(DataIdentifier identifier) throws DataStoreException {
+ if (null == identifier) throw new NullPointerException("identifier");
+
+ String key = getKeyName(identifier);
+ long start = System.currentTimeMillis();
+ ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
+ try {
+ Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
+
+ boolean result = getAzureContainer().getBlockBlobReference(key).deleteIfExists();
+ LOG.debug("Blob {}. identifier={} duration={}",
+ result ? "deleted" : "delete requested, but it does not exist (perhaps already deleted)",
+ key, (System.currentTimeMillis() - start));
+ }
+ catch (StorageException e) {
+ LOG.info("Error deleting blob. identifier={}", key, e);
+ throw new DataStoreException(e);
+ }
+ catch (URISyntaxException e) {
+ throw new DataStoreException(e);
+ } finally {
+ if (contextClassLoader != null) {
+ Thread.currentThread().setContextClassLoader(contextClassLoader);
+ }
+ }
+ }
+
+ @Override
+ public void addMetadataRecord(InputStream input, String name) throws DataStoreException {
+ if (null == input) {
+ throw new NullPointerException("input");
+ }
+ if (Strings.isNullOrEmpty(name)) {
+ throw new IllegalArgumentException("name");
+ }
+ long start = System.currentTimeMillis();
+ ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
+ try {
+ Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
+
+ addMetadataRecordImpl(input, name, -1L);
+ LOG.debug("Metadata record added. metadataName={} duration={}", name, (System.currentTimeMillis() - start));
+ }
+ finally {
+ if (null != contextClassLoader) {
+ Thread.currentThread().setContextClassLoader(contextClassLoader);
+ }
+ }
+ }
+
+ @Override
+ public void addMetadataRecord(File input, String name) throws DataStoreException {
+ if (null == input) {
+ throw new NullPointerException("input");
+ }
+ if (Strings.isNullOrEmpty(name)) {
+ throw new IllegalArgumentException("name");
+ }
+ long start = System.currentTimeMillis();
+ ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
+ try {
+ Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
+
+ addMetadataRecordImpl(new FileInputStream(input), name, input.length());
+ LOG.debug("Metadata record added. metadataName={} duration={}", name, (System.currentTimeMillis() - start));
+ }
+ catch (FileNotFoundException e) {
+ throw new DataStoreException(e);
+ }
+ finally {
+ if (null != contextClassLoader) {
+ Thread.currentThread().setContextClassLoader(contextClassLoader);
+ }
+ }
+ }
+
+ private void addMetadataRecordImpl(final InputStream input, String name, long recordLength) throws DataStoreException {
+ try {
+ CloudBlobDirectory metaDir = getAzureContainer().getDirectoryReference(META_DIR_NAME);
+ CloudBlockBlob blob = metaDir.getBlockBlobReference(name);
+ blob.upload(input, recordLength);
+ }
+ catch (StorageException e) {
+ LOG.info("Error adding metadata record. metadataName={} length={}", name, recordLength, e);
+ throw new DataStoreException(e);
+ }
+ catch (URISyntaxException | IOException e) {
+ throw new DataStoreException(e);
+ }
+ }
+
+ @Override
+ public DataRecord getMetadataRecord(String name) {
+ ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
+ long start = System.currentTimeMillis();
+ try {
+ Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
+
+ CloudBlobDirectory metaDir = getAzureContainer().getDirectoryReference(META_DIR_NAME);
+ CloudBlockBlob blob = metaDir.getBlockBlobReference(name);
+ if (!blob.exists()) {
+ LOG.warn("Trying to read missing metadata. metadataName={}", name);
+ return null;
+ }
+ blob.downloadAttributes();
+ long lastModified = blob.getProperties().getLastModified().getTime();
+ long length = blob.getProperties().getLength();
+ AzureBlobStoreDataRecord record = new AzureBlobStoreDataRecord(this,
+ connectionString,
+ containerName, new DataIdentifier(name),
+ lastModified,
+ length,
+ true);
+ LOG.debug("Metadata record read. metadataName={} duration={} record={}", name, (System.currentTimeMillis() - start), record);
+ return record;
+
+ } catch (StorageException e) {
+ LOG.info("Error reading metadata record. metadataName={}", name, e);
+ throw new RuntimeException(e);
+ } catch (Exception e) {
+ LOG.debug("Error reading metadata record. metadataName={}", name, e);
+ throw new RuntimeException(e);
+ } finally {
+ if (null != contextClassLoader) {
+ Thread.currentThread().setContextClassLoader(contextClassLoader);
+ }
+ }
+ }
+
+ @Override
+ public List getAllMetadataRecords(String prefix) {
+ if (null == prefix) {
+ throw new NullPointerException("prefix");
+ }
+ long start = System.currentTimeMillis();
+ final List records = Lists.newArrayList();
+ ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
+ try {
+ Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
+
+ CloudBlobDirectory metaDir = getAzureContainer().getDirectoryReference(META_DIR_NAME);
+ for (ListBlobItem item : metaDir.listBlobs(prefix)) {
+ if (item instanceof CloudBlob) {
+ CloudBlob blob = (CloudBlob) item;
+ records.add(new AzureBlobStoreDataRecord(
+ this,
+ connectionString,
+ containerName,
+ new DataIdentifier(stripMetaKeyPrefix(blob.getName())),
+ blob.getProperties().getLastModified().getTime(),
+ blob.getProperties().getLength(),
+ true));
+ }
+ }
+ LOG.debug("Metadata records read. recordsRead={} metadataFolder={} duration={}", records.size(), prefix, (System.currentTimeMillis() - start));
+ }
+ catch (StorageException e) {
+ LOG.info("Error reading all metadata records. metadataFolder={}", prefix, e);
+ }
+ catch (DataStoreException | URISyntaxException e) {
+ LOG.debug("Error reading all metadata records. metadataFolder={}", prefix, e);
+ }
+ finally {
+ if (null != contextClassLoader) {
+ Thread.currentThread().setContextClassLoader(contextClassLoader);
+ }
+ }
+ return records;
+ }
+
+ @Override
+ public boolean deleteMetadataRecord(String name) {
+ long start = System.currentTimeMillis();
+ ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
+ try {
+ Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
+
+ CloudBlockBlob blob = getAzureContainer().getBlockBlobReference(addMetaKeyPrefix(name));
+ boolean result = blob.deleteIfExists();
+ LOG.debug("Metadata record {}. metadataName={} duration={}",
+ result ? "deleted" : "delete requested, but it does not exist (perhaps already deleted)",
+ name, (System.currentTimeMillis() - start));
+ return result;
+
+ }
+ catch (StorageException e) {
+ LOG.info("Error deleting metadata record. metadataName={}", name, e);
+ }
+ catch (DataStoreException | URISyntaxException e) {
+ LOG.debug("Error deleting metadata record. metadataName={}", name, e);
+ }
+ finally {
+ if (contextClassLoader != null) {
+ Thread.currentThread().setContextClassLoader(contextClassLoader);
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public void deleteAllMetadataRecords(String prefix) {
+ if (null == prefix) {
+ throw new NullPointerException("prefix");
+ }
+ long start = System.currentTimeMillis();
+ ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
+ try {
+ Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
+
+ CloudBlobDirectory metaDir = getAzureContainer().getDirectoryReference(META_DIR_NAME);
+ int total = 0;
+ for (ListBlobItem item : metaDir.listBlobs(prefix)) {
+ if (item instanceof CloudBlob) {
+ if (((CloudBlob)item).deleteIfExists()) {
+ total++;
+ }
+ }
+ }
+ LOG.debug("Metadata records deleted. recordsDeleted={} metadataFolder={} duration={}",
+ total, prefix, (System.currentTimeMillis() - start));
+
+ }
+ catch (StorageException e) {
+ LOG.info("Error deleting all metadata records. metadataFolder={}", prefix, e);
+ }
+ catch (DataStoreException | URISyntaxException e) {
+ LOG.debug("Error deleting all metadata records. metadataFolder={}", prefix, e);
+ }
+ finally {
+ if (null != contextClassLoader) {
+ Thread.currentThread().setContextClassLoader(contextClassLoader);
+ }
+ }
+ }
+
+
+ /**
+ * Get key from data identifier. Object is stored with key in ADS.
+ */
+ private static String getKeyName(DataIdentifier identifier) {
+ String key = identifier.toString();
+ return key.substring(0, 4) + Utils.DASH + key.substring(4);
+ }
+
+ /**
+ * Get data identifier from key.
+ */
+ private static String getIdentifierName(String key) {
+ if (!key.contains(Utils.DASH)) {
+ return null;
+ } else if (key.contains(META_KEY_PREFIX)) {
+ return key;
+ }
+ return key.substring(0, 4) + key.substring(5);
+ }
+
+ private static String addMetaKeyPrefix(final String key) {
+ return META_KEY_PREFIX + key;
+ }
+
+ private static String stripMetaKeyPrefix(String name) {
+ if (name.startsWith(META_KEY_PREFIX)) {
+ return name.substring(META_KEY_PREFIX.length());
+ }
+ return name;
+ }
+
+ private static class AzureBlobInfo {
+ private final String name;
+ private final long lastModified;
+ private final long length;
+
+ public AzureBlobInfo(String name, long lastModified, long length) {
+ this.name = name;
+ this.lastModified = lastModified;
+ this.length = length;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public long getLastModified() {
+ return lastModified;
+ }
+
+ public long getLength() {
+ return length;
+ }
+
+ public static AzureBlobInfo fromCloudBlob(CloudBlob cloudBlob) {
+ return new AzureBlobInfo(cloudBlob.getName(),
+ cloudBlob.getProperties().getLastModified().getTime(),
+ cloudBlob.getProperties().getLength());
+ }
+ }
+
+ private class RecordsIterator extends AbstractIterator {
+ // Seems to be thread-safe (in 5.0.0)
+ ResultContinuation resultContinuation;
+ boolean firstCall = true;
+ final Function transformer;
+ final Queue items = Lists.newLinkedList();
+
+ public RecordsIterator (Function transformer) {
+ this.transformer = transformer;
+ }
+
+ @Override
+ protected T computeNext() {
+ if (items.isEmpty()) {
+ loadItems();
+ }
+ if (!items.isEmpty()) {
+ return transformer.apply(items.remove());
+ }
+ return endOfData();
+ }
+
+ private boolean loadItems() {
+ long start = System.currentTimeMillis();
+ ClassLoader contextClassLoader = currentThread().getContextClassLoader();
+ try {
+ currentThread().setContextClassLoader(getClass().getClassLoader());
+
+ CloudBlobContainer container = Utils.getBlobContainer(connectionString, containerName);
+ if (!firstCall && (resultContinuation == null || !resultContinuation.hasContinuation())) {
+ LOG.trace("No more records in container. containerName={}", container);
+ return false;
+ }
+ firstCall = false;
+ ResultSegment results = container.listBlobsSegmented(null, false, EnumSet.noneOf(BlobListingDetails.class), null, resultContinuation, null, null);
+ resultContinuation = results.getContinuationToken();
+ for (ListBlobItem item : results.getResults()) {
+ if (item instanceof CloudBlob) {
+ items.add(AzureBlobInfo.fromCloudBlob((CloudBlob)item));
+ }
+ }
+ LOG.debug("Container records batch read. batchSize={} containerName={} duration={}",
+ results.getLength(), containerName, (System.currentTimeMillis() - start));
+ return results.getLength() > 0;
+ }
+ catch (StorageException e) {
+ LOG.info("Error listing blobs. containerName={}", containerName, e);
+ }
+ catch (DataStoreException e) {
+ LOG.debug("Cannot list blobs. containerName={}", containerName, e);
+ } finally {
+ if (contextClassLoader != null) {
+ currentThread().setContextClassLoader(contextClassLoader);
+ }
+ }
+ return false;
+ }
+ }
+
+ static class AzureBlobStoreDataRecord extends AbstractDataRecord {
+ final String connectionString;
+ final String containerName;
+ final long lastModified;
+ final long length;
+ final boolean isMeta;
+
+ public AzureBlobStoreDataRecord(AbstractSharedBackend backend, String connectionString, String containerName,
+ DataIdentifier key, long lastModified, long length) {
+ this(backend, connectionString, containerName, key, lastModified, length, false);
+ }
+
+ public AzureBlobStoreDataRecord(AbstractSharedBackend backend, String connectionString, String containerName,
+ DataIdentifier key, long lastModified, long length, boolean isMeta) {
+ super(backend, key);
+ this.connectionString = connectionString;
+ this.containerName = containerName;
+ this.lastModified = lastModified;
+ this.length = length;
+ this.isMeta = isMeta;
+ }
+
+ @Override
+ public long getLength() throws DataStoreException {
+ return length;
+ }
+
+ @Override
+ public InputStream getStream() throws DataStoreException {
+ String id = getKeyName(getIdentifier());
+ CloudBlobContainer container = Utils.getBlobContainer(connectionString, containerName);
+ if (isMeta) {
+ id = addMetaKeyPrefix(getIdentifier().toString());
+ }
+ try {
+ return container.getBlockBlobReference(id).openInputStream();
+ } catch (StorageException | URISyntaxException e) {
+ throw new DataStoreException(e);
+ }
+ }
+
+ @Override
+ public long getLastModified() {
+ return lastModified;
+ }
+
+ @Override
+ public String toString() {
+ return "AzureBlobStoreDataRecord{" +
+ "identifier=" + getIdentifier() +
+ ", length=" + length +
+ ", lastModified=" + lastModified +
+ ", containerName='" + containerName + '\'' +
+ '}';
+ }
+ }
+}
diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureConstants.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureConstants.java
new file mode 100644
index 0000000000..09ac541162
--- /dev/null
+++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureConstants.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+public final class AzureConstants {
+ /**
+ * Azure Stoage Account name
+ */
+ public static final String AZURE_STORAGE_ACCOUNT_NAME = "accessKey";
+
+ /**
+ * Azure Stoage Account Key
+ */
+ public static final String AZURE_STORAGE_ACCOUNT_KEY = "secretKey";
+
+ /**
+ * Azure Blob Storage container name
+ */
+ public static final String AZURE_BLOB_CONTAINER_NAME = "container";
+
+ /**
+ * Azure Blob Storage request timeout.
+ */
+ public static final String AZURE_BLOB_REQUEST_TIMEOUT = "socketTimeout";
+
+ /**
+ * Azure Blob Storage maximum retries per request.
+ */
+ public static final String AZURE_BLOB_MAX_REQUEST_RETRY = "maxErrorRetry";
+
+ /**
+ * Azure Blob Storage maximum connections per operation (default 1)
+ */
+ public static final String AZURE_BLOB_CONCURRENT_REQUESTS_PER_OPERATION = "maxConnections";
+
+ /**
+ * Constant to set proxy host.
+ */
+ public static final String PROXY_HOST = "proxyHost";
+
+ /**
+ * Constant to set proxy port.
+ */
+ public static final String PROXY_PORT = "proxyPort";
+
+ private AzureConstants() { }
+}
diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStore.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStore.java
new file mode 100644
index 0000000000..e1b7223235
--- /dev/null
+++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStore.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+import org.apache.jackrabbit.oak.plugins.blob.AbstractSharedCachingDataStore;
+import org.apache.jackrabbit.oak.spi.blob.AbstractSharedBackend;
+import org.apache.jackrabbit.oak.spi.blob.SharedBackend;
+
+import java.util.Properties;
+
+public class AzureDataStore extends AbstractSharedCachingDataStore {
+
+ private int minRecordLength = 16*1024;
+
+ protected Properties properties;
+
+ @Override
+ protected AbstractSharedBackend createBackend() {
+ AzureBlobStoreBackend backend = new AzureBlobStoreBackend();
+ if (null != properties) {
+ backend.setProperties(properties);
+ }
+ return backend;
+ }
+
+ public void setProperties(final Properties properties) {
+ this.properties = properties;
+ }
+
+ public SharedBackend getBackend() {
+ return backend;
+ }
+
+ @Override
+ public int getMinRecordLength() {
+ return minRecordLength;
+ }
+
+ public void setMinRecordLength(int minRecordLength) {
+ this.minRecordLength = minRecordLength;
+ }
+}
diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreService.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreService.java
new file mode 100644
index 0000000000..cf9ed00ee8
--- /dev/null
+++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreService.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.ConfigurationPolicy;
+
+@Component(policy = ConfigurationPolicy.REQUIRE, name = AzureDataStoreService.NAME, metatype = true)
+public class AzureDataStoreService extends AbstractAzureDataStoreService {
+ public static final String NAME = "org.apache.jackrabbit.oak.plugins.blob.datastore.AzureDataStore";
+}
diff --git a/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/Utils.java b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/Utils.java
new file mode 100644
index 0000000000..d4305973fa
--- /dev/null
+++ b/oak-blob-cloud-azure/src/main/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/Utils.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+import com.google.common.base.Strings;
+import com.microsoft.azure.storage.CloudStorageAccount;
+import com.microsoft.azure.storage.OperationContext;
+import com.microsoft.azure.storage.RetryExponentialRetry;
+import com.microsoft.azure.storage.RetryNoRetry;
+import com.microsoft.azure.storage.RetryPolicy;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.CloudBlobClient;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+import org.apache.jackrabbit.core.data.DataStoreException;
+import org.apache.jackrabbit.oak.commons.PropertiesUtil;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.InetSocketAddress;
+import java.net.Proxy;
+import java.net.SocketAddress;
+import java.net.URISyntaxException;
+import java.security.InvalidKeyException;
+import java.util.Properties;
+
+public final class Utils {
+
+ public static final String DEFAULT_CONFIG_FILE = "azure.properties";
+
+ public static final String DASH = "-";
+
+ /**
+ * private constructor so that class cannot initialized from outside.
+ */
+ private Utils() {
+ }
+
+ /**
+ * Create CloudBlobClient from properties.
+ *
+ * @param connectionString connectionString to configure @link {@link CloudBlobClient}
+ * @return {@link CloudBlobClient}
+ */
+ public static CloudBlobClient getBlobClient(final String connectionString) throws URISyntaxException, InvalidKeyException {
+ CloudStorageAccount account = CloudStorageAccount.parse(connectionString);
+ CloudBlobClient client = account.createCloudBlobClient();
+ return client;
+ }
+
+ public static CloudBlobContainer getBlobContainer(final String connectionString, final String containerName) throws DataStoreException {
+ try {
+ CloudBlobClient client = Utils.getBlobClient(connectionString);
+ return client.getContainerReference(containerName);
+ } catch (InvalidKeyException | URISyntaxException | StorageException e) {
+ throw new DataStoreException(e);
+ }
+ }
+
+ public static void setProxyIfNeeded(final Properties properties) {
+ String proxyHost = properties.getProperty(AzureConstants.PROXY_HOST);
+ String proxyPort = properties.getProperty(AzureConstants.PROXY_PORT);
+
+ if (!Strings.isNullOrEmpty(proxyHost) &&
+ Strings.isNullOrEmpty(proxyPort)) {
+ int port = Integer.parseInt(proxyPort);
+ SocketAddress proxyAddr = new InetSocketAddress(proxyHost, port);
+ Proxy proxy = new Proxy(Proxy.Type.HTTP, proxyAddr);
+ OperationContext.setDefaultProxy(proxy);
+ }
+ }
+
+ public static RetryPolicy getRetryPolicy(final String maxRequestRetry) {
+ int retries = PropertiesUtil.toInteger(maxRequestRetry, -1);
+ if (retries < 0) {
+ return null;
+ }
+ if (retries == 0) {
+ return new RetryNoRetry();
+ }
+ return new RetryExponentialRetry(RetryPolicy.DEFAULT_CLIENT_BACKOFF, retries);
+ }
+
+
+ public static String getConnectionStringFromProperties(Properties properties) {
+ return getConnectionString(
+ properties.getProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME, ""),
+ properties.getProperty(AzureConstants.AZURE_STORAGE_ACCOUNT_KEY, ""));
+ }
+
+ public static String getConnectionString(final String accountName, final String accountKey) {
+ return String.format(
+ "DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s",
+ accountName,
+ accountKey
+ );
+ }
+
+ /**
+ * Read a configuration properties file. If the file name ends with ";burn",
+ * the file is deleted after reading.
+ *
+ * @param fileName the properties file name
+ * @return the properties
+ * @throws java.io.IOException if the file doesn't exist
+ */
+ public static Properties readConfig(String fileName) throws IOException {
+ if (!new File(fileName).exists()) {
+ throw new IOException("Config file not found. fileName=" + fileName);
+ }
+ Properties prop = new Properties();
+ InputStream in = null;
+ try {
+ in = new FileInputStream(fileName);
+ prop.load(in);
+ } finally {
+ if (in != null) {
+ in.close();
+ }
+ }
+ return prop;
+ }
+}
diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreTest.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreTest.java
new file mode 100644
index 0000000000..3fb9130cbc
--- /dev/null
+++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreTest.java
@@ -0,0 +1,691 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+import static org.apache.commons.codec.binary.Hex.encodeHexString;
+import static org.apache.commons.io.FileUtils.copyInputStreamToFile;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+
+import com.google.common.base.Strings;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import com.microsoft.azure.storage.StorageException;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.io.output.NullOutputStream;
+import org.apache.jackrabbit.core.data.DataIdentifier;
+import org.apache.jackrabbit.core.data.DataRecord;
+import org.apache.jackrabbit.core.data.DataStoreException;
+import org.apache.jackrabbit.oak.spi.blob.SharedBackend;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.StringWriter;
+import java.net.URISyntaxException;
+import java.security.DigestOutputStream;
+import java.security.InvalidKeyException;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Random;
+import java.util.Set;
+
+import javax.jcr.RepositoryException;
+
+/**
+ * Test {@link AzureDataStore} with AzureDataStore and local cache on.
+ * It requires to pass azure config file via system property or system properties by prefixing with 'ds.'.
+ * See details @ {@link AzureDataStoreUtils}.
+ * For e.g. -Dconfig=/opt/cq/azure.properties. Sample azure properties located at
+ * src/test/resources/azure.properties
+ */
+public class AzureDataStoreTest {
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder(new File("target"));
+
+ private Properties props;
+ private static byte[] testBuffer = "test".getBytes();
+ private AzureDataStore ds;
+ private AzureBlobStoreBackend backend;
+ private String container;
+ Random randomGen = new Random();
+
+ @BeforeClass
+ public static void assumptions() {
+ assumeTrue(AzureDataStoreUtils.isAzureConfigured());
+ }
+
+ @Before
+ public void setup() throws IOException, RepositoryException, URISyntaxException, InvalidKeyException, StorageException {
+
+ props = AzureDataStoreUtils.getAzureConfig();
+ container = String.valueOf(randomGen.nextInt(9999)) + "-" + String.valueOf(randomGen.nextInt(9999))
+ + "-test";
+ props.setProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME, container);
+
+ ds = new AzureDataStore();
+ ds.setProperties(props);
+ ds.setCacheSize(0); // Turn caching off so we don't get weird test results due to caching
+ ds.init(folder.newFolder().getAbsolutePath());
+ backend = (AzureBlobStoreBackend) ds.getBackend();
+ }
+
+ @After
+ public void teardown() throws InvalidKeyException, URISyntaxException, StorageException {
+ ds = null;
+ try {
+ AzureDataStoreUtils.deleteContainer(container);
+ } catch (Exception ignore) {}
+ }
+
+ private void validateRecord(final DataRecord record,
+ final String contents,
+ final DataRecord rhs)
+ throws DataStoreException, IOException {
+ validateRecord(record, contents, rhs.getIdentifier(), rhs.getLength(), rhs.getLastModified());
+ }
+
+ private void validateRecord(final DataRecord record,
+ final String contents,
+ final DataIdentifier identifier,
+ final long length,
+ final long lastModified)
+ throws DataStoreException, IOException {
+ validateRecord(record, contents, identifier, length, lastModified, true);
+ }
+
+ private void validateRecord(final DataRecord record,
+ final String contents,
+ final DataIdentifier identifier,
+ final long length,
+ final long lastModified,
+ final boolean lastModifiedEquals)
+ throws DataStoreException, IOException {
+ assertEquals(record.getLength(), length);
+ if (lastModifiedEquals) {
+ assertEquals(record.getLastModified(), lastModified);
+ } else {
+ assertTrue(record.getLastModified() > lastModified);
+ }
+ assertTrue(record.getIdentifier().toString().equals(identifier.toString()));
+ StringWriter writer = new StringWriter();
+ org.apache.commons.io.IOUtils.copy(record.getStream(), writer, "utf-8");
+ assertTrue(writer.toString().equals(contents));
+ }
+
+ private static InputStream randomStream(int seed, int size) {
+ Random r = new Random(seed);
+ byte[] data = new byte[size];
+ r.nextBytes(data);
+ return new ByteArrayInputStream(data);
+ }
+
+ private static String getIdForInputStream(final InputStream in)
+ throws NoSuchAlgorithmException, IOException {
+ MessageDigest digest = MessageDigest.getInstance("SHA-1");
+ OutputStream output = new DigestOutputStream(new NullOutputStream(), digest);
+ try {
+ IOUtils.copyLarge(in, output);
+ } finally {
+ IOUtils.closeQuietly(output);
+ IOUtils.closeQuietly(in);
+ }
+ return encodeHexString(digest.digest());
+ }
+
+
+ @Test
+ public void testCreateAndDeleteBlobHappyPath() throws DataStoreException, IOException {
+ final DataRecord uploadedRecord = ds.addRecord(new ByteArrayInputStream(testBuffer));
+ DataIdentifier identifier = uploadedRecord.getIdentifier();
+ assertTrue(backend.exists(identifier));
+ assertTrue(0 != uploadedRecord.getLastModified());
+ assertEquals(testBuffer.length, uploadedRecord.getLength());
+
+ final DataRecord retrievedRecord = ds.getRecord(identifier);
+ validateRecord(retrievedRecord, new String(testBuffer), uploadedRecord);
+
+ ds.deleteRecord(identifier);
+ assertFalse(backend.exists(uploadedRecord.getIdentifier()));
+ }
+
+
+ @Test
+ public void testCreateAndReUploadBlob() throws DataStoreException, IOException {
+ final DataRecord createdRecord = ds.addRecord(new ByteArrayInputStream(testBuffer));
+ DataIdentifier identifier1 = createdRecord.getIdentifier();
+ assertTrue(backend.exists(identifier1));
+
+ final DataRecord record1 = ds.getRecord(identifier1);
+ validateRecord(record1, new String(testBuffer), createdRecord);
+
+ try { Thread.sleep(1001); } catch (InterruptedException e) { }
+
+ final DataRecord updatedRecord = ds.addRecord(new ByteArrayInputStream(testBuffer));
+ DataIdentifier identifier2 = updatedRecord.getIdentifier();
+ assertTrue(backend.exists(identifier2));
+
+ assertTrue(identifier1.toString().equals(identifier2.toString()));
+ validateRecord(record1, new String(testBuffer), createdRecord);
+
+ ds.deleteRecord(identifier1);
+ assertFalse(backend.exists(createdRecord.getIdentifier()));
+ }
+
+ @Test
+ public void testListBlobs() throws DataStoreException, IOException {
+ final Set identifiers = Sets.newHashSet();
+ final Set testStrings = Sets.newHashSet("test1", "test2", "test3");
+
+ for (String s : testStrings) {
+ identifiers.add(ds.addRecord(new ByteArrayInputStream(s.getBytes())).getIdentifier());
+ }
+
+ Iterator iter = ds.getAllIdentifiers();
+ while (iter.hasNext()) {
+ DataIdentifier identifier = iter.next();
+ assertTrue(identifiers.contains(identifier));
+ ds.deleteRecord(identifier);
+ }
+ }
+
+ ////
+ // Backend Tests
+ ////
+
+ private void validateRecordData(final SharedBackend backend,
+ final DataIdentifier identifier,
+ int expectedSize,
+ final InputStream expected) throws IOException, DataStoreException {
+ byte[] blobData = new byte[expectedSize];
+ backend.read(identifier).read(blobData);
+ byte[] expectedData = new byte[expectedSize];
+ expected.read(expectedData);
+ for (int i=0; i allIdentifiers = backend.getAllIdentifiers();
+ assertFalse(allIdentifiers.hasNext());
+ }
+
+ @Test
+ public void testBackendGetAllIdentifiers() throws DataStoreException, IOException, NoSuchAlgorithmException {
+ for (int expectedRecCount : Lists.newArrayList(1, 2, 5)) {
+ final List ids = Lists.newArrayList();
+ for (int i=0; i addedRecords = Maps.newHashMap();
+ if (0 < recCount) {
+ for (int i = 0; i < recCount; i++) {
+ String data = String.format("testData%d", i);
+ DataRecord record = ds.addRecord(new ByteArrayInputStream(data.getBytes()));
+ addedRecords.put(record.getIdentifier(), data);
+ }
+ }
+
+ Iterator iter = backend.getAllRecords();
+ List identifiers = Lists.newArrayList();
+ int actualCount = 0;
+ while (iter.hasNext()) {
+ DataRecord record = iter.next();
+ identifiers.add(record.getIdentifier());
+ assertTrue(addedRecords.containsKey(record.getIdentifier()));
+ StringWriter writer = new StringWriter();
+ IOUtils.copy(record.getStream(), writer);
+ assertTrue(writer.toString().equals(addedRecords.get(record.getIdentifier())));
+ actualCount++;
+ }
+
+ for (DataIdentifier identifier : identifiers) {
+ ds.deleteRecord(identifier);
+ }
+
+ assertEquals(recCount, actualCount);
+ }
+ }
+
+ // AddMetadataRecord (Backend)
+
+ @Test
+ public void testBackendAddMetadataRecordsFromInputStream() throws DataStoreException, IOException, NoSuchAlgorithmException {
+ for (boolean fromInputStream : Lists.newArrayList(false, true)) {
+ String prefix = String.format("%s.META.", getClass().getSimpleName());
+ for (int count : Lists.newArrayList(1, 3)) {
+ Map records = Maps.newHashMap();
+ for (int i = 0; i < count; i++) {
+ String recordName = String.format("%sname.%d", prefix, i);
+ String data = String.format("testData%d", i);
+ records.put(recordName, data);
+
+ if (fromInputStream) {
+ backend.addMetadataRecord(new ByteArrayInputStream(data.getBytes()), recordName);
+ }
+ else {
+ File testFile = folder.newFile();
+ copyInputStreamToFile(new ByteArrayInputStream(data.getBytes()), testFile);
+ backend.addMetadataRecord(testFile, recordName);
+ }
+ }
+
+ assertEquals(count, backend.getAllMetadataRecords(prefix).size());
+
+ for (Map.Entry entry : records.entrySet()) {
+ DataRecord record = backend.getMetadataRecord(entry.getKey());
+ StringWriter writer = new StringWriter();
+ IOUtils.copy(record.getStream(), writer);
+ backend.deleteMetadataRecord(entry.getKey());
+ assertTrue(writer.toString().equals(entry.getValue()));
+ }
+
+ assertEquals(0, backend.getAllMetadataRecords(prefix).size());
+ }
+ }
+ }
+
+ @Test
+ public void testBackendAddMetadataRecordFileNotFoundThrowsDataStoreException() throws IOException {
+ File testFile = folder.newFile();
+ copyInputStreamToFile(randomStream(0, 10), testFile);
+ testFile.delete();
+ try {
+ backend.addMetadataRecord(testFile, "name");
+ fail();
+ }
+ catch (DataStoreException e) {
+ assertTrue(e.getCause() instanceof FileNotFoundException);
+ }
+ }
+
+ @Test
+ public void testBackendAddMetadataRecordNullInputStreamThrowsNullPointerException() throws DataStoreException {
+ try {
+ backend.addMetadataRecord((InputStream)null, "name");
+ fail();
+ }
+ catch (NullPointerException e) {
+ assertTrue("input".equals(e.getMessage()));
+ }
+ }
+
+ @Test
+ public void testBackendAddMetadataRecordNullFileThrowsNullPointerException() throws DataStoreException {
+ try {
+ backend.addMetadataRecord((File)null, "name");
+ fail();
+ }
+ catch (NullPointerException e) {
+ assertTrue("input".equals(e.getMessage()));
+ }
+ }
+
+ @Test
+ public void testBackendAddMetadataRecordNullEmptyNameThrowsIllegalArgumentException() throws DataStoreException, IOException {
+ final String data = "testData";
+ for (boolean fromInputStream : Lists.newArrayList(false, true)) {
+ for (String name : Lists.newArrayList(null, "")) {
+ try {
+ if (fromInputStream) {
+ backend.addMetadataRecord(new ByteArrayInputStream(data.getBytes()), name);
+ } else {
+ File testFile = folder.newFile();
+ copyInputStreamToFile(new ByteArrayInputStream(data.getBytes()), testFile);
+ backend.addMetadataRecord(testFile, name);
+ }
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertTrue("name".equals(e.getMessage()));
+ }
+ }
+ }
+ }
+
+ // GetMetadataRecord (Backend)
+
+ @Test
+ public void testBackendGetMetadataRecordInvalidName() throws DataStoreException {
+ backend.addMetadataRecord(randomStream(0, 10), "testRecord");
+ assertNull(backend.getMetadataRecord("invalid"));
+ for (String name : Lists.newArrayList("", null)) {
+ try {
+ backend.getMetadataRecord(name);
+ fail("Expect to throw");
+ } catch(Exception e) {}
+ }
+
+ backend.deleteMetadataRecord("testRecord");
+ }
+
+ // GetAllMetadataRecords (Backend)
+
+ @Test
+ public void testBackendGetAllMetadataRecordsPrefixMatchesAll() throws DataStoreException {
+ assertEquals(0, backend.getAllMetadataRecords("").size());
+
+ String prefixAll = "prefix1";
+ String prefixSome = "prefix1.prefix2";
+ String prefixOne = "prefix1.prefix3";
+ String prefixNone = "prefix4";
+
+ backend.addMetadataRecord(randomStream(1, 10), String.format("%s.testRecord1", prefixAll));
+ backend.addMetadataRecord(randomStream(2, 10), String.format("%s.testRecord2", prefixSome));
+ backend.addMetadataRecord(randomStream(3, 10), String.format("%s.testRecord3", prefixSome));
+ backend.addMetadataRecord(randomStream(4, 10), String.format("%s.testRecord4", prefixOne));
+ backend.addMetadataRecord(randomStream(5, 10), "prefix5.testRecord5");
+
+ assertEquals(5, backend.getAllMetadataRecords("").size());
+ assertEquals(4, backend.getAllMetadataRecords(prefixAll).size());
+ assertEquals(2, backend.getAllMetadataRecords(prefixSome).size());
+ assertEquals(1, backend.getAllMetadataRecords(prefixOne).size());
+ assertEquals(0, backend.getAllMetadataRecords(prefixNone).size());
+
+ backend.deleteAllMetadataRecords("");
+ assertEquals(0, backend.getAllMetadataRecords("").size());
+ }
+
+ @Test
+ public void testBackendGetAllMetadataRecordsNullPrefixThrowsNullPointerException() {
+ try {
+ backend.getAllMetadataRecords(null);
+ fail();
+ }
+ catch (NullPointerException e) {
+ assertTrue("prefix".equals(e.getMessage()));
+ }
+ }
+
+ // DeleteMetadataRecord (Backend)
+
+ @Test
+ public void testBackendDeleteMetadataRecord() throws DataStoreException {
+ backend.addMetadataRecord(randomStream(0, 10), "name");
+ for (String name : Lists.newArrayList("invalid", "", null)) {
+ if (Strings.isNullOrEmpty(name)) {
+ try {
+ backend.deleteMetadataRecord(name);
+ }
+ catch (IllegalArgumentException e) { }
+ }
+ else {
+ assertFalse(backend.deleteMetadataRecord(name));
+ }
+ }
+ assertTrue(backend.deleteMetadataRecord("name"));
+ }
+
+ // DeleteAllMetadataRecords (Backend)
+
+ @Test
+ public void testBackendDeleteAllMetadataRecordsPrefixMatchesAll() throws DataStoreException {
+ String prefixAll = "prefix1";
+ String prefixSome = "prefix1.prefix2";
+ String prefixOne = "prefix1.prefix3";
+ String prefixNone = "prefix4";
+
+ Map prefixCounts = Maps.newHashMap();
+ prefixCounts.put(prefixAll, 4);
+ prefixCounts.put(prefixSome, 2);
+ prefixCounts.put(prefixOne, 1);
+ prefixCounts.put(prefixNone, 0);
+
+ for (Map.Entry entry : prefixCounts.entrySet()) {
+ backend.addMetadataRecord(randomStream(1, 10), String.format("%s.testRecord1", prefixAll));
+ backend.addMetadataRecord(randomStream(2, 10), String.format("%s.testRecord2", prefixSome));
+ backend.addMetadataRecord(randomStream(3, 10), String.format("%s.testRecord3", prefixSome));
+ backend.addMetadataRecord(randomStream(4, 10), String.format("%s.testRecord4", prefixOne));
+
+ int preCount = backend.getAllMetadataRecords("").size();
+
+ backend.deleteAllMetadataRecords(entry.getKey());
+
+ int deletedCount = preCount - backend.getAllMetadataRecords("").size();
+ assertEquals(entry.getValue().intValue(), deletedCount);
+
+ backend.deleteAllMetadataRecords("");
+ }
+ }
+
+ @Test
+ public void testBackendDeleteAllMetadataRecordsNoRecordsNoChange() {
+ assertEquals(0, backend.getAllMetadataRecords("").size());
+
+ backend.deleteAllMetadataRecords("");
+
+ assertEquals(0, backend.getAllMetadataRecords("").size());
+ }
+
+ @Test
+ public void testBackendDeleteAllMetadataRecordsNullPrefixThrowsNullPointerException() {
+ try {
+ backend.deleteAllMetadataRecords(null);
+ fail();
+ }
+ catch (NullPointerException e) {
+ assertTrue("prefix".equals(e.getMessage()));
+ }
+ }
+}
diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java
new file mode 100644
index 0000000000..e8ad0ef59c
--- /dev/null
+++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/AzureDataStoreUtils.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+import com.google.common.base.Predicate;
+import com.google.common.base.Strings;
+import com.google.common.collect.Maps;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.core.data.DataStore;
+import org.apache.jackrabbit.oak.commons.PropertiesUtil;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.InputStream;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * Extension to {@link DataStoreUtils} to enable Azure extensions for cleaning and initialization.
+ */
+public class AzureDataStoreUtils extends DataStoreUtils {
+ private static final Logger log = LoggerFactory.getLogger(AzureDataStoreUtils.class);
+
+ private static final String DEFAULT_CONFIG_PATH = "./src/test/resources/azure.properties";
+
+
+ /**
+ * Check for presence of mandatory properties.
+ *
+ * @return true if mandatory props configured.
+ */
+ public static boolean isAzureConfigured() {
+ Properties props = getAzureConfig();
+ if (!props.containsKey(AzureConstants.AZURE_STORAGE_ACCOUNT_KEY) || !props.containsKey(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME)
+ || !(props.containsKey(AzureConstants.AZURE_BLOB_CONTAINER_NAME))) {
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * Read any config property configured.
+ * Also, read any props available as system properties.
+ * System properties take precedence.
+ *
+ * @return Properties instance
+ */
+ public static Properties getAzureConfig() {
+ String config = System.getProperty("azure.config");
+ if (Strings.isNullOrEmpty(config)) {
+ config = DEFAULT_CONFIG_PATH;
+ }
+ Properties props = new Properties();
+ if (new File(config).exists()) {
+ InputStream is = null;
+ try {
+ is = new FileInputStream(config);
+ props.load(is);
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ IOUtils.closeQuietly(is);
+ }
+ props.putAll(getConfig());
+ Map filtered = Maps.filterEntries(Maps.fromProperties(props), new Predicate>() {
+ @Override public boolean apply(Map.Entry extends Object, ? extends Object> input) {
+ return !Strings.isNullOrEmpty((String) input.getValue());
+ }
+ });
+ props = new Properties();
+ props.putAll(filtered);
+ }
+ return props;
+ }
+
+ public static DataStore getAzureDataStore(Properties props, String homeDir) throws Exception {
+ AzureDataStore ds = new AzureDataStore();
+ PropertiesUtil.populate(ds, Maps.fromProperties(props), false);
+ ds.setProperties(props);
+ ds.init(homeDir);
+
+ return ds;
+ }
+
+ public static void deleteContainer(String containerName) throws Exception {
+ if (Strings.isNullOrEmpty(containerName)) {
+ log.warn("Cannot delete container with null or empty name. containerName={}", containerName);
+ return;
+ }
+ log.info("Starting to delete container. containerName={}", containerName);
+ Properties props = getAzureConfig();
+ CloudBlobContainer container = Utils.getBlobContainer(Utils.getConnectionStringFromProperties(props), containerName);
+ boolean result = container.deleteIfExists();
+ log.info("Container deleted. containerName={} existed={}", containerName, result);
+ }
+}
diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java
new file mode 100644
index 0000000000..5433a9d2f8
--- /dev/null
+++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDS.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+import static org.junit.Assume.assumeTrue;
+
+import org.apache.jackrabbit.core.data.DataStore;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.AbstractDataStoreTest;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Properties;
+
+import javax.jcr.RepositoryException;
+
+/**
+ * Test {@link AzureDataStore} with AzureDataStore and local cache on.
+ * It requires to pass azure config file via system property or system properties by prefixing with 'ds.'.
+ * See details @ {@link AzureDataStoreUtils}.
+ * For e.g. -Dconfig=/opt/cq/azure.properties. Sample azure properties located at
+ * src/test/resources/azure.properties
+ */
+public class TestAzureDS extends AbstractDataStoreTest {
+
+ protected static final Logger LOG = LoggerFactory.getLogger(TestAzureDS.class);
+ protected Properties props;
+ protected String container;
+
+ @BeforeClass
+ public static void assumptions() {
+ assumeTrue(AzureDataStoreUtils.isAzureConfigured());
+ }
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ props = AzureDataStoreUtils.getAzureConfig();
+ container = String.valueOf(randomGen.nextInt(9999)) + "-" + String.valueOf(randomGen.nextInt(9999))
+ + "-test";
+ props.setProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME, container);
+ props.setProperty("secret", "123456");
+ super.setUp();
+ }
+
+ @Override
+ @After
+ public void tearDown() {
+ try {
+ super.tearDown();
+ AzureDataStoreUtils.deleteContainer(container);
+ } catch (Exception ignore) {
+
+ }
+ }
+
+ @Override
+ protected DataStore createDataStore() throws RepositoryException {
+ DataStore azureds = null;
+ try {
+ azureds = AzureDataStoreUtils.getAzureDataStore(props, dataStoreDir);
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ sleep(1000);
+ return azureds;
+ }
+
+ /**---------- Skipped -----------**/
+ @Override
+ public void testUpdateLastModifiedOnAccess() {
+ }
+
+ @Override
+ public void testDeleteAllOlderThan() {
+ }
+}
diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java
new file mode 100644
index 0000000000..a222b90359
--- /dev/null
+++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDSWithSmallCache.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+import org.apache.jackrabbit.core.data.CachingDataStore;
+import org.apache.jackrabbit.core.data.LocalCache;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test {@link CachingDataStore} with AzureBlobStoreBackend and with very small size (@link
+ * {@link LocalCache}.
+ * It requires to pass azure config file via system property or system properties by prefixing with 'ds.'.
+ * See details @ {@link AzureDataStoreUtils}.
+ * For e.g. -Dconfig=/opt/cq/azure.properties. Sample azure properties located at
+ * src/test/resources/azure.properties
+
+ */
+public class TestAzureDSWithSmallCache extends TestAzureDS {
+
+ protected static final Logger LOG = LoggerFactory.getLogger(TestAzureDSWithSmallCache.class);
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ props.setProperty("cacheSize", String.valueOf(dataLength * 10));
+ }
+}
diff --git a/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java
new file mode 100644
index 0000000000..5dd2aab66a
--- /dev/null
+++ b/oak-blob-cloud-azure/src/test/java/org/apache/jackrabbit/oak/blob/cloud/azure/blobstorage/TestAzureDsCacheOff.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage;
+
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test {@link org.apache.jackrabbit.core.data.CachingDataStore} with AzureBlobStoreBackend
+ * and local cache Off.
+ * It requires to pass azure config file via system property or system properties by prefixing with 'ds.'.
+ * See details @ {@link AzureDataStoreUtils}.
+ * For e.g. -Dconfig=/opt/cq/azure.properties. Sample azure properties located at
+ * src/test/resources/azure.properties
+
+ */
+public class TestAzureDsCacheOff extends TestAzureDS {
+
+ protected static final Logger LOG = LoggerFactory.getLogger(TestAzureDsCacheOff.class);
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ props.setProperty("cacheSize", "0");
+ }
+}
diff --git a/oak-blob-cloud-azure/src/test/resources/azure.properties b/oak-blob-cloud-azure/src/test/resources/azure.properties
new file mode 100644
index 0000000000..c6205c7048
--- /dev/null
+++ b/oak-blob-cloud-azure/src/test/resources/azure.properties
@@ -0,0 +1,40 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Microsoft Azure authentication credentials.
+# https://azure.microsoft.com/en-us/documentation/articles/storage-create-storage-account
+# account name
+accessKey=
+# account key
+secretKey=
+
+# Microsoft Azure blob storage container name. Container is a grouping of a set
+# of blobs. https://msdn.microsoft.com/en-us/library/dd135715.aspx
+container=
+
+# The timeout interval, in milliseconds, to use for the request (default 5 minutes)
+socketTimeout=120000
+
+# Concurrent number of simultaneous requests per operation (default 1)
+maxConnections=1
+
+# Number of retries per request (default 3)
+maxErrorRetry=3
+
+# proxy configurations (optional)
+proxyHost=
+proxyPort=
diff --git a/oak-blob-cloud-azure/src/test/resources/logback-test.xml b/oak-blob-cloud-azure/src/test/resources/logback-test.xml
new file mode 100644
index 0000000000..cacb52dd54
--- /dev/null
+++ b/oak-blob-cloud-azure/src/test/resources/logback-test.xml
@@ -0,0 +1,40 @@
+
+
+
+
+
+ %date{HH:mm:ss.SSS} %-5level %-40([%thread] %F:%L) %msg%n
+
+
+
+
+ target/unit-tests.log
+
+ %date{HH:mm:ss.SSS} %-5level %-40([%thread] %F:%L) %msg%n
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/oak-doc/src/site/markdown/osgi_config.md b/oak-doc/src/site/markdown/osgi_config.md
index dc265a2af0..cb0215ffad 100644
--- a/oak-doc/src/site/markdown/osgi_config.md
+++ b/oak-doc/src/site/markdown/osgi_config.md
@@ -433,6 +433,7 @@ All the above data stores enable local file system caching with the following pa
* _PID `org.apache.jackrabbit.oak.plugins.blob.datastore.SharedS3DataStore`_
* _PID `org.apache.jackrabbit.oak.plugins.blob.datastore.S3DataStore`_
* _PID `org.apache.jackrabbit.oak.plugins.blob.datastore.FileDataStore`_
+* _PID `org.apache.jackrabbit.oak.plugins.blob.datastore.AzureDataStore`_
cacheSize
: Default - 68719476736
diff --git a/oak-doc/src/site/markdown/plugins/blobstore.md b/oak-doc/src/site/markdown/plugins/blobstore.md
index 37effce19c..91aa402384 100644
--- a/oak-doc/src/site/markdown/plugins/blobstore.md
+++ b/oak-doc/src/site/markdown/plugins/blobstore.md
@@ -78,6 +78,7 @@ Further Oak ships with multiple BlobStore implementations
4. `S3DataStore` (with wrapper) - Stores the file in Amazon S3
5. `RDBBlobStore` - Store the file contents in chunks in a relational databases. Typically used with
`DocumentNodeStore`when using a relational DB persistence
+6. `AzureDataStore` (with wrapper) - Stores the file in Microsoft Azure Blob storage
In addition there are some more implementations which are considered **experimental**
@@ -98,6 +99,7 @@ can be used
* FileDataStore - This should be used if the blobs/binaries have to be shared between multiple
repositories. This would also be used when a JR2 repository is migrated to Oak
* S3DataStore - This should be used when binaries are stored in Amazon S3
+* AzureDataStore - This should be used when binaries are stored in Microsoft Azure Blob storage
#### DocumentNodeStore
@@ -112,7 +114,7 @@ one of the following can be used
#### Caching DataStore
-The DataStore implementations `S3DataStore` and `CachingFileDataStore` support local file system caching for the
+The DataStore implementations `S3DataStore`,`CachingFileDataStore` and `AzureDataStore` support local file system caching for the
files/blobs and extend the `AbstractSharedCachingDataStore` class which implements the caching functionality. The
`CachingFileDataStore` is useful when the DataStore is on nfs.
The cache has a size limit and is configured by the `cacheSize` parameter.
@@ -198,6 +200,7 @@ Blob Garbage Collection(GC) is applicable for the following blob stores:
* FileDataStore
* S3DataStore
* SharedS3DataStore (since Oak 1.2.0)
+ * AzureDataStore
Oak implements a Mark and Sweep based Garbage Collection logic.
@@ -224,7 +227,7 @@ The garbage collection can be triggered by calling:
#### Caching of Blob ids locally (Oak 1.6.x)
-For the `FileDataStore` and `S3DataStore` the blob ids are cached locally on the disk when they are created which
+For the `FileDataStore`, `S3DataStore` and `AzureDataStore` the blob ids are cached locally on the disk when they are created which
speeds up the 'Mark BlobStore' phase. The locally tracked ids are synchronized with the data store periodically to enable
other cluster nodes or different repositories sharing the datastore to get a consolidated list of all blob ids. The
interval of synchronization is defined by the OSGi configuration parameter `blobTrackSnapshotIntervalInSecs` for the
@@ -249,13 +252,14 @@ following should be executed.
##### Registration
-On start of a repository configured to use a shared DataStore (same path or S3 bucket), a unique repository id is
+On start of a repository configured to use a shared DataStore (same path, S3 bucket or Azure container), a unique repository id is
generated and registered in the NodeStore as well as the DataStore.
In the DataStore this repository id is registered as an empty file with the format `repository-[repository-id]`
(e.g. repository-988373a0-3efb-451e-ab4c-f7e794189273). This empty file is created under:
* FileDataStore - Under the root directory configured for the datastore.
* S3DataStore - Under `META` folder in the S3 bucket configured.
+* AzureDataStore - Under `META` folder in the Azure container configured.
On start/configuration of all the repositories sharing the data store it should be confirmed that the unique
repositoryId per repository is registered in the DataStore. Refer the section below on [Checking Shared GC status](#check-shared-datastore-gc).
@@ -387,6 +391,7 @@ the steps:
* Remove the corresponding registered repository file (`repository-[repositoryId]`) from the DataStore
* FileDataStore - Remove the file from the data store root directory.
* S3DataStore - Remove the file from the `META` folder of the S3 bucket.
+ * AzureDataStore - Remove the file from the `META` folder of the Azure container.
* Remove other files corresponding to the particular repositoryId e.g. `markedTimestamp-[repositoryId]` or
`references-[repositoryId]`.
diff --git a/oak-it/pom.xml b/oak-it/pom.xml
index 566d141205..8c93c32313 100644
--- a/oak-it/pom.xml
+++ b/oak-it/pom.xml
@@ -57,6 +57,12 @@
org.apache.jackrabbit
+ oak-blob-cloud-azure
+ ${project.version}
+ test
+
+
+ org.apache.jackrabbit
oak-core
${project.version}
test-jar
@@ -85,6 +91,13 @@
org.apache.jackrabbit
+ oak-blob-cloud-azure
+ ${project.version}
+ test-jar
+ test
+
+
+ org.apache.jackrabbit
oak-commons
${project.version}
test
diff --git a/oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/document/AzureSharedBlobStoreGCTest.java b/oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/document/AzureSharedBlobStoreGCTest.java
new file mode 100644
index 0000000000..e631496256
--- /dev/null
+++ b/oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/document/AzureSharedBlobStoreGCTest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.document;
+
+import static org.junit.Assume.assumeTrue;
+
+import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants;
+import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStoreUtils;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
+import org.junit.After;
+import org.junit.BeforeClass;
+
+import java.io.File;
+import java.util.Properties;
+
+/**
+ * Shared BlobStoreGCTest for Azure.
+ */
+public class AzureSharedBlobStoreGCTest extends SharedBlobStoreGCTest {
+
+ protected String containerName;
+
+ @BeforeClass
+ public static void assumptions() {
+ assumeTrue(AzureDataStoreUtils.isAzureConfigured());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ AzureDataStoreUtils.deleteContainer(containerName);
+ super.tearDown();
+ }
+
+ @Override
+ protected DataStoreBlobStore getBlobStore(File rootFolder) throws Exception {
+ Properties props = AzureDataStoreUtils.getAzureConfig();
+ containerName = rootFolder.getName();
+ props.setProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME, containerName);
+ props.setProperty("cacheSize", "0");
+ return new DataStoreBlobStore(
+ AzureDataStoreUtils.getAzureDataStore(props, rootFolder.getAbsolutePath()));
+ }
+
+ @Override
+ protected void sleep() throws InterruptedException {
+ if (AzureDataStoreUtils.isAzureConfigured()) {
+ Thread.sleep(1000);
+ }
+ }
+}
diff --git a/oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/document/MongoAzureDataStoreBlobGCTest.java b/oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/document/MongoAzureDataStoreBlobGCTest.java
new file mode 100644
index 0000000000..99fc4aff32
--- /dev/null
+++ b/oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/document/MongoAzureDataStoreBlobGCTest.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.document;
+
+import static org.junit.Assume.assumeTrue;
+
+import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants;
+import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStoreUtils;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
+import org.apache.jackrabbit.oak.plugins.document.blob.ds.MongoDataStoreBlobGCTest;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+import java.io.File;
+import java.util.Date;
+import java.util.Properties;
+
+/**
+ * Tests DataStoreGC with Mongo and Azure
+ */
+public class MongoAzureDataStoreBlobGCTest extends MongoDataStoreBlobGCTest {
+
+ @BeforeClass
+ public static void assumptions() {
+ assumeTrue(AzureDataStoreUtils.isAzureConfigured());
+ }
+
+ protected String bucket;
+
+ @Before
+ @Override
+ public void setUpConnection() throws Exception {
+ Properties props = AzureDataStoreUtils.getAzureConfig();
+ startDate = new Date();
+ mongoConnection = connectionFactory.getConnection();
+ MongoUtils.dropCollections(mongoConnection.getDB());
+ File root = folder.newFolder();
+ bucket = root.getName();
+ props.setProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME, bucket);
+ props.setProperty("cacheSize", "0");
+ blobStore = new DataStoreBlobStore(
+ AzureDataStoreUtils.getAzureDataStore(props, root.getAbsolutePath()));
+ mk = new DocumentMK.Builder().clock(getTestClock()).setMongoDB(mongoConnection.getDB())
+ .setBlobStore(blobStore).open();
+ }
+
+ @After
+ @Override
+ public void tearDownConnection() throws Exception {
+ AzureDataStoreUtils.deleteContainer(bucket);
+ super.tearDownConnection();
+ }
+}
diff --git a/oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedAzureDataStoreUtilsTest.java b/oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedAzureDataStoreUtilsTest.java
new file mode 100644
index 0000000000..5b8bc35c57
--- /dev/null
+++ b/oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedAzureDataStoreUtilsTest.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.document;
+
+import static org.junit.Assume.assumeTrue;
+
+import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants;
+import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStoreUtils;
+import org.apache.jackrabbit.oak.plugins.blob.SharedDataStoreUtilsTest;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Properties;
+
+/**
+ * SharedDataStoreUtilsTest for Azure.
+ */
+public class SharedAzureDataStoreUtilsTest extends SharedDataStoreUtilsTest {
+ private static final Logger log = LoggerFactory.getLogger(SharedAzureDataStoreUtilsTest.class);
+
+ protected String containerName;
+
+ @BeforeClass
+ public static void assumptions() {
+ assumeTrue(AzureDataStoreUtils.isAzureConfigured());
+ }
+
+ @Override
+ protected DataStoreBlobStore getBlobStore(File rootFolder) throws Exception {
+ Properties props = AzureDataStoreUtils.getAzureConfig();
+ containerName = rootFolder.getName();
+ props.setProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME, containerName);
+ props.setProperty("cacheSize", "0");
+ DataStoreBlobStore dataStoreBlobStore = new DataStoreBlobStore(
+ AzureDataStoreUtils.getAzureDataStore(props, rootFolder.getAbsolutePath()));
+ return dataStoreBlobStore;
+ }
+
+ @After
+ public void close() throws IOException {
+ try {
+ AzureDataStoreUtils.deleteContainer(containerName);
+ } catch (Exception e) {
+ log.error("Error closing data store", e);
+ }
+ }
+}
diff --git a/oak-it/src/test/java/org/apache/jackrabbit/oak/segment/SegmentAzureDataStoreBlobGCIT.java b/oak-it/src/test/java/org/apache/jackrabbit/oak/segment/SegmentAzureDataStoreBlobGCIT.java
new file mode 100644
index 0000000000..c663b28c89
--- /dev/null
+++ b/oak-it/src/test/java/org/apache/jackrabbit/oak/segment/SegmentAzureDataStoreBlobGCIT.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.segment;
+
+import static org.apache.jackrabbit.oak.commons.FixturesHelper.Fixture.SEGMENT_TAR;
+import static org.apache.jackrabbit.oak.commons.FixturesHelper.getFixtures;
+import static org.junit.Assume.assumeTrue;
+
+import com.google.common.base.Strings;
+
+import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants;
+import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStoreUtils;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
+import org.junit.After;
+import org.junit.BeforeClass;
+
+import java.io.File;
+import java.util.Properties;
+
+/**
+ * Tests for SegmentNodeStore on AzureDataStore GC
+ */
+public class SegmentAzureDataStoreBlobGCIT extends SegmentDataStoreBlobGCIT {
+ protected String containerName;
+
+ @BeforeClass
+ public static void assumptions() {
+ assumeTrue(getFixtures().contains(SEGMENT_TAR));
+ assumeTrue(AzureDataStoreUtils.isAzureConfigured());
+ }
+
+ @Override
+ protected DataStoreBlobStore getBlobStore(File rootFolder) throws Exception {
+ Properties props = AzureDataStoreUtils.getAzureConfig();
+ containerName = rootFolder.getName();
+ props.setProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME, containerName);
+ props.setProperty("cacheSize", "0");
+ return new DataStoreBlobStore(AzureDataStoreUtils.getAzureDataStore(props, rootFolder.getAbsolutePath()));
+ }
+
+ @After
+ public void close() throws Exception {
+ if (!Strings.isNullOrEmpty(containerName)) {
+ AzureDataStoreUtils.deleteContainer(containerName);
+ }
+ }
+}
+
diff --git a/oak-parent/pom.xml b/oak-parent/pom.xml
index fbbf9b6a3b..1acce8d7c8 100644
--- a/oak-parent/pom.xml
+++ b/oak-parent/pom.xml
@@ -177,6 +177,7 @@
!org.apache.jackrabbit.oak.spi.blob.split
!org.apache.jackrabbit.oak.spi.blob.stats
!org.apache.jackrabbit.oak.blob.cloud.aws.s3
+ !org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage
!org.apache.jackrabbit.oak
!org.apache.jackrabbit.oak.json
!org.apache.jackrabbit.oak.management
diff --git a/oak-run/pom.xml b/oak-run/pom.xml
index 8218bc08a8..70b943485f 100644
--- a/oak-run/pom.xml
+++ b/oak-run/pom.xml
@@ -383,6 +383,13 @@
+ org.apache.jackrabbit
+ oak-blob-cloud-azure
+ ${project.version}
+ true
+
+
+
org.apache.sling
org.apache.sling.testing.osgi-mock
compile
diff --git a/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/BlobStoreFixture.java b/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/BlobStoreFixture.java
index 6fb89e7042..4c8f8741b2 100644
--- a/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/BlobStoreFixture.java
+++ b/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/BlobStoreFixture.java
@@ -45,7 +45,7 @@ import org.apache.jackrabbit.oak.stats.StatisticsProvider;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.apache.jackrabbit.oak.fixture.DataStoreUtils.cleanup;
-import static org.apache.jackrabbit.oak.fixture.DataStoreUtils.configureIfS3DataStore;
+import static org.apache.jackrabbit.oak.fixture.DataStoreUtils.configureIfCloudDataStore;
public abstract class BlobStoreFixture implements Closeable{
private final String name;
@@ -85,7 +85,7 @@ public abstract class BlobStoreFixture implements Closeable{
String className = System.getProperty("dataStore");
if (className != null) {
- return getDataStore(basedir, fdsCacheInMB);
+ return getDataStore(basedir, fdsCacheInMB, statisticsProvider);
}
String blobStore = System.getProperty("blobStoreType");
@@ -178,7 +178,8 @@ public abstract class BlobStoreFixture implements Closeable{
};
}
- public static BlobStoreFixture getDataStore(final File basedir, final int fdsCacheInMB) {
+ public static BlobStoreFixture getDataStore(final File basedir, final int fdsCacheInMB,
+ final StatisticsProvider statisticsProvider) {
return new BlobStoreFixture("DS") {
private DataStore dataStore;
private BlobStore blobStore;
@@ -193,7 +194,8 @@ public abstract class BlobStoreFixture implements Closeable{
dataStore = Class.forName(className).asSubclass(DataStore.class).newInstance();
config = getConfig();
configure(dataStore, config);
- dataStore = configureIfS3DataStore(className, dataStore, config, unique.toLowerCase());
+
+ dataStore = configureIfCloudDataStore(className, dataStore, config, unique.toLowerCase(), statisticsProvider);
storeDir = new File(basedir, unique);
dataStore.init(storeDir.getAbsolutePath());
blobStore = new DataStoreBlobStore(dataStore, true, fdsCacheInMB);
@@ -209,7 +211,7 @@ public abstract class BlobStoreFixture implements Closeable{
if (blobStore instanceof DataStoreBlobStore) {
try {
((DataStoreBlobStore) blobStore).close();
- cleanup(storeDir, config);
+ cleanup(storeDir, config, unique.toLowerCase());
} catch (Exception e) {
throw new RuntimeException(e);
}
diff --git a/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/DataStoreUtils.java b/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/DataStoreUtils.java
index 92ed037499..f6d4f38248 100644
--- a/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/DataStoreUtils.java
+++ b/oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/DataStoreUtils.java
@@ -31,61 +31,84 @@ import com.amazonaws.services.s3.model.ObjectListing;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.amazonaws.services.s3.transfer.TransferManager;
import com.google.common.base.Strings;
+import com.microsoft.azure.storage.blob.CloudBlobContainer;
+
import org.apache.commons.io.FileUtils;
import org.apache.jackrabbit.core.data.DataStore;
import org.apache.jackrabbit.oak.blob.cloud.aws.s3.SharedS3DataStore;
+import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureConstants;
+import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStore;
import org.apache.jackrabbit.oak.blob.cloud.s3.S3Constants;
import org.apache.jackrabbit.oak.blob.cloud.s3.S3DataStore;
import org.apache.jackrabbit.oak.blob.cloud.s3.Utils;
+import org.apache.jackrabbit.oak.stats.StatisticsProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * Extension to {@link DataStoreUtils} to enable S3 extensions for cleaning and initialization.
+ * Extension to {@link DataStoreUtils} to enable S3 / AzureBlob extensions for cleaning and initialization.
*/
public class DataStoreUtils {
private static final Logger log = LoggerFactory.getLogger(DataStoreUtils.class);
private static Class JR2_S3 = SharedS3DataStore.class;
private static Class S3 = S3DataStore.class;
+ private static Class AZURE = AzureDataStore.class;
public static boolean isS3DataStore(String dsName) {
return (dsName != null) && (dsName.equals(S3.getName()) || dsName.equals(JR2_S3.getName()));
}
- public static DataStore configureIfS3DataStore(String className, DataStore ds,
- Map config, String bucket) throws Exception {
+ public static boolean isAzureDataStore(String dsName) {
+ return (dsName != null) &&
+ (dsName.equals(AZURE.getName()));
+ }
+
+ public static DataStore configureIfCloudDataStore(String className, DataStore ds,
+ Map config, String bucket,
+ StatisticsProvider statisticsProvider) throws Exception {
// Add bucket info
Properties props = new Properties();
props.putAll(config);
- props.setProperty(S3Constants.S3_BUCKET, bucket);
-
- // Set the props object
- if (S3.getName().equals(className)) {
- ((S3DataStore) ds).setProperties(props);
- } else
- if (JR2_S3.getName().equals(className)) {
- ((org.apache.jackrabbit.oak.blob.cloud.aws.s3.SharedS3DataStore) ds).setProperties(props);
+
+ log.info("Using bucket [ {} ]", bucket);
+
+ if (isS3DataStore(className)) {
+ props.setProperty(S3Constants.S3_BUCKET, bucket);
+
+ // Set the props object
+ if (S3.getName().equals(className)) {
+ ((S3DataStore) ds).setProperties(props);
+ ((S3DataStore) ds).setStatisticsProvider(statisticsProvider);
+ } else if (JR2_S3.getName().equals(className)) {
+ ((org.apache.jackrabbit.oak.blob.cloud.aws.s3.SharedS3DataStore) ds).setProperties(props);
+ }
+ } else if (isAzureDataStore(className)) {
+ props.setProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME, bucket);
+ ((AzureDataStore) ds).setProperties(props);
+ ((AzureDataStore) ds).setStatisticsProvider(statisticsProvider);
}
+
return ds;
}
/**
- * Clean directory and if S3 bucket configured delete that.
+ * Clean directory and if S3 bucket/Azure container is configured delete that.
*
* @param storeDir the local directory
* @param config the datastore config
+ * @param bucket the S3 bucket name / Azure container name
* @throws Exception
*/
- public static void cleanup(File storeDir, Map config) throws Exception {
+ public static void cleanup(File storeDir, Map config, String bucket) throws Exception {
FileUtils.deleteQuietly(storeDir);
- String bucket = null;
if (config.containsKey(S3Constants.S3_BUCKET)) {
- bucket = (String) config.get(S3Constants.S3_BUCKET);
if (!Strings.isNullOrEmpty(bucket)) {
deleteBucket(bucket, config, new Date());
}
+ } else if (config.containsKey(AzureConstants.AZURE_BLOB_CONTAINER_NAME)) {
+ deleteAzureContainer(config, bucket);
}
}
@@ -124,4 +147,22 @@ public class DataStoreUtils {
tmx.shutdownNow();
s3service.shutdown();
}
+
+ public static void deleteAzureContainer(Map config, String containerName) throws Exception {
+ String accountName = (String)config.get(AzureConstants.AZURE_STORAGE_ACCOUNT_NAME);
+ String accountKey = (String)config.get(AzureConstants.AZURE_STORAGE_ACCOUNT_KEY);
+ if (Strings.isNullOrEmpty(containerName) ||
+ Strings.isNullOrEmpty(accountName) ||
+ Strings.isNullOrEmpty(accountKey)) {
+ return;
+ }
+ log.info("deleting container [" + containerName + "]");
+ CloudBlobContainer container = org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Utils
+ .getBlobContainer(org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.Utils.getConnectionString(accountName, accountKey), containerName);
+ if (container.deleteIfExists()) {
+ log.info("container [ " + containerName + "] deleted");
+ } else {
+ log.info("container [" + containerName + "] doesn't exists");
+ }
+ }
}
diff --git a/oak-run/src/main/java/org/apache/jackrabbit/oak/run/DataStoreCheckCommand.java b/oak-run/src/main/java/org/apache/jackrabbit/oak/run/DataStoreCheckCommand.java
index 65a85cc68f..625df447c4 100644
--- a/oak-run/src/main/java/org/apache/jackrabbit/oak/run/DataStoreCheckCommand.java
+++ b/oak-run/src/main/java/org/apache/jackrabbit/oak/run/DataStoreCheckCommand.java
@@ -79,7 +79,7 @@ public class DataStoreCheckCommand implements Command {
String helpStr =
"datastorecheck [--id] [--ref] [--consistency] [--store |] "
- + "[--s3ds |--fds ] [--dump ]";
+ + "[--s3ds |--fds |--azureblobds ] [--dump ]";
Closer closer = Closer.create();
try {
diff --git a/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Utils.java b/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Utils.java
index 729f0a8b79..b993b41374 100644
--- a/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Utils.java
+++ b/oak-run/src/main/java/org/apache/jackrabbit/oak/run/Utils.java
@@ -21,6 +21,7 @@ import static java.util.Arrays.asList;
import static org.apache.jackrabbit.oak.commons.PropertiesUtil.populate;
import java.io.Closeable;
+import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Dictionary;
@@ -31,17 +32,12 @@ import java.util.Properties;
import javax.annotation.Nullable;
import javax.jcr.RepositoryException;
-import com.google.common.collect.Maps;
-import com.google.common.io.Closer;
-import com.mongodb.MongoClientURI;
-import com.mongodb.MongoURI;
-import joptsimple.ArgumentAcceptingOptionSpec;
-import joptsimple.OptionParser;
-import joptsimple.OptionSet;
-import joptsimple.OptionSpec;
+import org.apache.commons.io.FileUtils;
import org.apache.felix.cm.file.ConfigurationHandler;
import org.apache.jackrabbit.core.data.DataStore;
import org.apache.jackrabbit.core.data.DataStoreException;
+import org.apache.jackrabbit.oak.blob.cloud.azure.blobstorage.AzureDataStore;
+import org.apache.jackrabbit.oak.blob.cloud.aws.s3.SharedS3DataStore;
import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore;
import org.apache.jackrabbit.oak.blob.cloud.aws.s3.SharedS3DataStore;
@@ -53,6 +49,17 @@ import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionExc
import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore;
import org.apache.jackrabbit.oak.spi.state.NodeStore;
+import com.google.common.collect.Maps;
+import com.google.common.io.Closer;
+import com.google.common.io.Files;
+import com.mongodb.MongoClientURI;
+import com.mongodb.MongoURI;
+
+import joptsimple.ArgumentAcceptingOptionSpec;
+import joptsimple.OptionParser;
+import joptsimple.OptionSet;
+import joptsimple.OptionSpec;
+
class Utils {
private static final long MB = 1024 * 1024;
@@ -131,10 +138,13 @@ class Utils {
parser.accepts("s3ds", "S3DataStore config").withRequiredArg().ofType(String.class);
ArgumentAcceptingOptionSpec fdsConfig =
parser.accepts("fds", "FileDataStore config").withRequiredArg().ofType(String.class);
+ ArgumentAcceptingOptionSpec azureBlobDSConfig =
+ parser.accepts("azureblobds", "AzureBlobStorageDataStore config").withRequiredArg().ofType(String.class);
+
OptionSet options = parser.parse(args);
- if (!options.has(s3dsConfig) && !options.has(fdsConfig)) {
+ if (!options.has(s3dsConfig) && !options.has(fdsConfig) && !options.has(azureBlobDSConfig)) {
return null;
}
@@ -146,6 +156,15 @@ class Utils {
s3ds.setProperties(props);
s3ds.init(null);
delegate = s3ds;
+ } else if (options.has(azureBlobDSConfig)) {
+ AzureDataStore azureds = new AzureDataStore();
+ String cfgPath = azureBlobDSConfig.value(options);
+ Properties props = loadAndTransformProps(cfgPath);
+ azureds.setProperties(props);
+ File homeDir = Files.createTempDir();
+ azureds.init(homeDir.getAbsolutePath());
+ closer.register(asCloseable(homeDir));
+ delegate = azureds;
} else {
delegate = new OakFileDataStore();
String cfgPath = fdsConfig.value(options);
@@ -203,6 +222,16 @@ class Utils {
};
}
+ static Closeable asCloseable(final File dir) {
+ return new Closeable() {
+
+ @Override
+ public void close() throws IOException {
+ FileUtils.deleteDirectory(dir);
+ }
+ };
+ }
+
private static Properties loadAndTransformProps(String cfgPath) throws IOException {
Dictionary dict = ConfigurationHandler.read(new FileInputStream(cfgPath));
@@ -214,4 +243,4 @@ class Utils {
}
return props;
}
-}
+}
\ No newline at end of file
diff --git a/pom.xml b/pom.xml
index e14f093ab8..103c2ecb9e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -40,6 +40,7 @@
oak-commons
oak-blob
oak-blob-cloud
+ oak-blob-cloud-azure
oak-core
oak-jcr
oak-upgrade