Index: oak-segment-azure/pom.xml
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
--- oak-segment-azure/pom.xml (revision c657d96a82279472a7f9431e5416d4fc770eea85)
+++ oak-segment-azure/pom.xml (date 1537250121000)
@@ -189,6 +189,11 @@
logback-classic
test
+
+ org.apache.jackrabbit
+ oak-blob-plugins
+ test
+
Index: oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/split/SplitPersistenceBlobTest.java
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
--- oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/split/SplitPersistenceBlobTest.java (date 1537250422000)
+++ oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/split/SplitPersistenceBlobTest.java (date 1537250422000)
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.segment.split;
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.security.InvalidKeyException;
+import java.util.Random;
+import java.util.Set;
+
+import com.google.common.base.Strings;
+import com.microsoft.azure.storage.StorageException;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.CommitFailedException;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore;
+import org.apache.jackrabbit.oak.segment.SegmentNodeStore;
+import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders;
+import org.apache.jackrabbit.oak.segment.azure.AzurePersistence;
+import org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule;
+import org.apache.jackrabbit.oak.segment.file.FileStore;
+import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
+import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
+import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence;
+import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence;
+import org.apache.jackrabbit.oak.spi.blob.BlobStore;
+import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
+import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
+import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
+import org.apache.jackrabbit.oak.spi.state.NodeStore;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static org.apache.jackrabbit.oak.segment.compaction.SegmentGCOptions.defaultGCOptions;
+import static org.junit.Assert.assertEquals;
+
+public class SplitPersistenceBlobTest {
+
+ @ClassRule
+ public static AzuriteDockerRule azurite = new AzuriteDockerRule();
+
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder(new File("target"));
+
+ private SegmentNodeStore base;
+
+ private SegmentNodeStore split;
+
+ private FileStore baseFileStore;
+
+ private FileStore splitFileStore;
+
+ @Before
+ public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, StorageException {
+ SegmentNodeStorePersistence sharedPersistence =
+ new AzurePersistence(azurite.getContainer("oak-test").getDirectoryReference("oak"));
+ File dataStoreDir = new File(folder.getRoot(), "blobstore");
+
+ baseFileStore = FileStoreBuilder
+ .fileStoreBuilder(folder.newFolder())
+ .withCustomPersistence(sharedPersistence)
+ .build();
+ base = SegmentNodeStoreBuilders.builder(baseFileStore).build();
+
+ NodeBuilder builder = base.getRoot().builder();
+ builder.child("foo").child("bar").setProperty("version", "v1");
+ base.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+ baseFileStore.flush();
+
+ SegmentNodeStorePersistence localPersistence = new TarPersistence(folder.newFolder());
+ SegmentNodeStorePersistence splitPersistence = new SplitPersistence(sharedPersistence, localPersistence);
+
+ splitFileStore = FileStoreBuilder
+ .fileStoreBuilder(folder.newFolder())
+ .withCustomPersistence(splitPersistence)
+ .withBlobStore(newBlobStore(dataStoreDir))
+ .withGCOptions(defaultGCOptions().setGcSizeDeltaEstimation(0))
+ .build();
+ split = SegmentNodeStoreBuilders.builder(splitFileStore).build();
+ }
+
+ @After
+ public void tearDown() {
+ splitFileStore.close();
+ baseFileStore.close();
+ }
+
+ @Test
+ public void collectReferences()
+ throws IOException, CommitFailedException {
+ String blobId = createLoad().getContentIdentity();
+
+ assertReferences(1, blobId);
+ }
+
+ private static Blob createBlob(NodeStore nodeStore, int size) throws IOException {
+ byte[] data = new byte[size];
+ new Random().nextBytes(data);
+ return nodeStore.createBlob(new ByteArrayInputStream(data));
+ }
+
+ private static BlobStore newBlobStore(File directory) {
+ OakFileDataStore delegate = new OakFileDataStore();
+ delegate.setPath(directory.getAbsolutePath());
+ delegate.init(null);
+ return new DataStoreBlobStore(delegate);
+ }
+
+ private Blob createLoad()
+ throws IOException, CommitFailedException {
+ NodeBuilder builder = split.getRoot().builder();
+ Blob blob = createBlob(split, 18000);
+ builder.setProperty("bin", blob);
+ split.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+ splitFileStore.flush();
+ return blob;
+ }
+
+ private void assertReferences(int count, String blobId)
+ throws IOException {
+ Set actualReferences = newHashSet();
+ splitFileStore.collectBlobReferences(actualReferences::add);
+ assertEquals("Read only store visible references different", count, actualReferences.size());
+ if (!Strings.isNullOrEmpty(blobId)) {
+ assertEquals("Binary reference returned should be same", blobId,
+ actualReferences.toArray(new String[0])[0]);
+ }
+ }
+}