Index: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/blob/ClusterRepositoryInfoTest.java =================================================================== --- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/blob/ClusterRepositoryInfoTest.java (revision 0) +++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/blob/ClusterRepositoryInfoTest.java (working copy) @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.blob; + +import static org.hamcrest.CoreMatchers.instanceOf; +import junit.framework.Assert; + +import org.apache.jackrabbit.oak.plugins.document.DocumentMK; +import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore; +import org.apache.jackrabbit.oak.plugins.document.blob.ds.DataStoreUtils; +import org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore; +import org.apache.jackrabbit.oak.plugins.identifier.ClusterRepositoryInfo; +import org.apache.jackrabbit.oak.spi.blob.BlobStore; +import org.junit.Assume; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Tests the ClusterRepositoryInfo unique cluster repository id. + */ +public class ClusterRepositoryInfoTest { + static BlobStore blobStore; + + @BeforeClass + public static void setup() { + try { + blobStore = DataStoreUtils.getBlobStore(); + Assume.assumeThat(blobStore, instanceOf(SharedDataStore.class)); + } catch (Exception e) { + Assume.assumeNoException(e); + } + } + + @Test + public void differentCluster() throws Exception { + DocumentNodeStore ds1 = new DocumentMK.Builder() + .setAsyncDelay(0) + .setDocumentStore(new MemoryDocumentStore()) + .setBlobStore(blobStore) + .getNodeStore(); + + String repoId1 = ClusterRepositoryInfo.createId(ds1); + DocumentNodeStore ds2 = new DocumentMK.Builder() + .setAsyncDelay(0) + .setDocumentStore(new MemoryDocumentStore()) + .setBlobStore(blobStore) + .getNodeStore(); + String repoId2 = ClusterRepositoryInfo.createId(ds2); + + Assert.assertNotSame(repoId1, repoId2); + } + + @Test + public void sameCluster() throws Exception { + MemoryDocumentStore store = new MemoryDocumentStore(); + DocumentNodeStore ds1 = new DocumentMK.Builder() + .setAsyncDelay(0) + .setDocumentStore(store) + .setClusterId(1) + .setBlobStore(blobStore) + .getNodeStore(); + String repoId1 = ClusterRepositoryInfo.createId(ds1); + ds1.runBackgroundOperations(); + + DocumentNodeStore ds2 = new DocumentMK.Builder() + .setAsyncDelay(0) + .setDocumentStore(store) + .setClusterId(2) + .setBlobStore(blobStore) + .getNodeStore(); + String repoId2 = ClusterRepositoryInfo.createId(ds2); + + // Since the same cluster the ids should be equal + Assert.assertEquals(repoId1, repoId2); + } +} Property changes on: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/blob/ClusterRepositoryInfoTest.java ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreUtilsTest.java =================================================================== --- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreUtilsTest.java (revision 0) +++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreUtilsTest.java (working copy) @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.plugins.blob; + +import static org.hamcrest.CoreMatchers.instanceOf; + +import java.io.ByteArrayInputStream; +import java.util.UUID; + +import junit.framework.Assert; + +import org.apache.jackrabbit.core.data.DataRecord; +import org.apache.jackrabbit.oak.plugins.blob.datastore.SharedDataStoreUtils; +import org.apache.jackrabbit.oak.plugins.document.blob.ds.DataStoreUtils; +import org.junit.Assume; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Test for SharedDataUtils to test addition, retrieval and deletion of root records. + */ +public class SharedDataStoreUtilsTest { + SharedDataStore dataStore; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + try { + Assume.assumeThat(DataStoreUtils.getBlobStore(), instanceOf(SharedDataStore.class)); + } catch (Exception e) { + Assume.assumeNoException(e); + } + } + + @Test + public void test() throws Exception { + dataStore = DataStoreUtils.getBlobStore(); + String repoId1 = UUID.randomUUID().toString(); + String repoId2 = UUID.randomUUID().toString(); + + // Add repository records + DataRecord repo1 = SharedDataStoreUtils.addRootRecord( + dataStore, + SharedStoreRecordType.REPOSITORY, + new ByteArrayInputStream(new byte[0]), + repoId1); + DataRecord repo2 = SharedDataStoreUtils.addRootRecord( + dataStore, + SharedStoreRecordType.REPOSITORY, + new ByteArrayInputStream(new byte[0]), + repoId2); + + // Add reference records + DataRecord rec1 = SharedDataStoreUtils.addRootRecord( + dataStore, + SharedStoreRecordType.REFERENCES, + new ByteArrayInputStream(new byte[0]), + repoId1); + DataRecord rec2 = SharedDataStoreUtils.addRootRecord( + dataStore, + SharedStoreRecordType.REFERENCES, + new ByteArrayInputStream(new byte[0]), + repoId2); + + Assert.assertEquals( + SharedStoreRecordType.REPOSITORY.getIdFromName(repo1.getIdentifier().toString()), + repoId1); + Assert.assertEquals( + SharedStoreRecordType.REPOSITORY.getIdFromName(repo2.getIdentifier().toString()), + repoId2); + Assert.assertEquals( + SharedStoreRecordType.REFERENCES.getIdFromName(rec1.getIdentifier().toString()), + repoId1); + Assert.assertEquals( + SharedStoreRecordType.REFERENCES.getIdFromName(rec2.getIdentifier().toString()), + repoId2); + + // All the references from registered repositories are available + Assert.assertTrue( + SharedDataStoreUtils.refsNotAvailableFromRepos( + SharedDataStoreUtils.getRootRecords(dataStore, + SharedStoreRecordType.REPOSITORY), + SharedDataStoreUtils.getRootRecords(dataStore, + SharedStoreRecordType.REFERENCES)).isEmpty()); + + // Earliest should be the 1st reference record + Assert.assertEquals( + SharedDataStoreUtils.getEarliestRecord( + SharedDataStoreUtils.getRootRecords(dataStore, + SharedStoreRecordType.REFERENCES)).getIdentifier().toString(), + SharedStoreRecordType.REFERENCES.getNameFromId(repoId1)); + + // Delete references and check back if deleted + SharedDataStoreUtils.deleteRootRecords(dataStore, SharedStoreRecordType.REFERENCES); + Assert.assertTrue(SharedDataStoreUtils.getRootRecords(dataStore, + SharedStoreRecordType.REFERENCES).isEmpty()); + + // Repository ids should still be available + Assert.assertEquals(2, + SharedDataStoreUtils.getRootRecords(dataStore, SharedStoreRecordType.REPOSITORY) + .size()); + } +} Property changes on: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/blob/SharedDataStoreUtilsTest.java ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/blob/ds/DataStoreUtils.java =================================================================== --- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/blob/ds/DataStoreUtils.java (revision 1615951) +++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/blob/ds/DataStoreUtils.java (working copy) @@ -24,6 +24,7 @@ import org.apache.jackrabbit.core.data.FileDataStore; import org.apache.jackrabbit.oak.commons.PropertiesUtil; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; +import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore; import org.apache.jackrabbit.oak.plugins.document.AbstractMongoConnectionTest; import org.junit.Test; @@ -46,8 +47,9 @@ private static final String DS_PROP_PREFIX = "ds."; private static final String BS_PROP_PREFIX = "bs."; + public static long time; public static DataStoreBlobStore getBlobStore() throws Exception { - String className = System.getProperty(DS_CLASS_NAME, FileDataStore.class.getName()); + String className = System.getProperty(DS_CLASS_NAME, OakFileDataStore.class.getName()); DataStore ds = Class.forName(className).asSubclass(DataStore.class).newInstance(); PropertiesUtil.populate(ds, getConfig(), false); ds.init(getHomeDir()); @@ -67,7 +69,8 @@ } private static String getHomeDir() { - return concat(new File(".").getAbsolutePath(), "target/blobstore/" + System.currentTimeMillis()); + return concat(new File(".").getAbsolutePath(), "target/blobstore/" + + (time == 0 ? System.currentTimeMillis() : time)); } @Test Index: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/MongoBlobGCTest.java =================================================================== --- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/MongoBlobGCTest.java (revision 1615951) +++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/MongoBlobGCTest.java (working copy) @@ -34,6 +34,7 @@ import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector; import org.apache.jackrabbit.oak.plugins.document.VersionGarbageCollector.VersionGCStats; +import org.apache.jackrabbit.oak.plugins.identifier.ClusterRepositoryInfo; import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore; import org.apache.jackrabbit.oak.spi.commit.CommitInfo; import org.apache.jackrabbit.oak.spi.commit.EmptyHook; @@ -138,6 +139,7 @@ addInlined(); gc(set); } + @Test public void gcVersionDeleteWithInlined() throws Exception { HashSet set = setUp(false); @@ -144,13 +146,15 @@ addInlined(); gc(set); } + private void gc(HashSet set) throws Exception { DocumentNodeStore store = mk.getNodeStore(); + String repoId = ClusterRepositoryInfo.createId(store); MarkSweepGarbageCollector gc = new MarkSweepGarbageCollector( new DocumentBlobReferenceRetriever(store), (GarbageCollectableBlobStore) store.getBlobStore(), MoreExecutors.sameThreadExecutor(), - "./target", 5, true, 0); + "./target", 5, 0, repoId); gc.collectGarbage(); Set existing = iterate(); Index: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java =================================================================== --- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java (revision 0) +++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java (working copy) @@ -0,0 +1,225 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.jackrabbit.oak.plugins.document; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import junit.framework.Assert; + +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import com.google.common.util.concurrent.MoreExecutors; + +import org.apache.jackrabbit.oak.api.Blob; +import org.apache.jackrabbit.oak.plugins.blob.BlobGarbageCollector; +import org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector; +import org.apache.jackrabbit.oak.plugins.blob.SharedDataStore; +import org.apache.jackrabbit.oak.plugins.blob.SharedStoreRecordType; +import org.apache.jackrabbit.oak.plugins.blob.datastore.SharedDataStoreUtils; +import org.apache.jackrabbit.oak.plugins.document.VersionGarbageCollector.VersionGCStats; +import org.apache.jackrabbit.oak.plugins.document.blob.ds.DataStoreUtils; +import org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore; +import org.apache.jackrabbit.oak.plugins.identifier.ClusterRepositoryInfo; +import org.apache.jackrabbit.oak.spi.blob.BlobStore; +import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore; +import org.apache.jackrabbit.oak.spi.commit.CommitInfo; +import org.apache.jackrabbit.oak.spi.commit.EmptyHook; +import org.apache.jackrabbit.oak.spi.state.NodeBuilder; +import org.apache.jackrabbit.oak.stats.Clock; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Test for gc in a shared data store among hetrogeneous oak node stores. + */ +public class SharedBlobStoreGCTest { + private Cluster cluster1; + private Cluster cluster2; + private Clock clock; + + @Before + public void setUp() throws Exception { + clock = new Clock.Virtual(); + clock.waitUntil(Revision.getCurrentTimestamp()); + DataStoreUtils.time = clock.getTime(); + BlobStore blobeStore1 = DataStoreUtils.getBlobStore(); + DocumentNodeStore ds1 = new DocumentMK.Builder() + .setAsyncDelay(0) + .setDocumentStore(new MemoryDocumentStore()) + .setBlobStore(blobeStore1) + .clock(clock) + .getNodeStore(); + String repoId1 = ClusterRepositoryInfo.createId(ds1); + // Register the unique repository id in the data store + SharedDataStoreUtils.addRootRecord( + ((SharedDataStore) blobeStore1), + SharedStoreRecordType.REPOSITORY, + new ByteArrayInputStream(new byte[0]), + repoId1); + + BlobStore blobeStore2 = DataStoreUtils.getBlobStore(); + DocumentNodeStore ds2 = new DocumentMK.Builder() + .setAsyncDelay(0) + .setDocumentStore(new MemoryDocumentStore()) + .setBlobStore(blobeStore2) + .clock(clock) + .getNodeStore(); + String repoId2 = ClusterRepositoryInfo.createId(ds2); + // Register the unique repository id in the data store + SharedDataStoreUtils.addRootRecord( + ((SharedDataStore) blobeStore2), + SharedStoreRecordType.REPOSITORY, + new ByteArrayInputStream(new byte[0]), + repoId2); + + cluster1 = new Cluster(ds1, repoId1, 20); + cluster1.init(); + cluster2 = new Cluster(ds2, repoId2, 100); + cluster2.init(); + } + + static InputStream randomStream(int seed, int size) { + Random r = new Random(seed); + byte[] data = new byte[size]; + r.nextBytes(data); + return new ByteArrayInputStream(data); + } + + @Test + public void testGC() throws Exception { + // Only run the mark phase on both the clusters + cluster1.gc.collectGarbage(true); + cluster2.gc.collectGarbage(true); + + // Execute the gc with sweep + cluster1.gc.collectGarbage(false); + + Assert.assertEquals(true, Sets.symmetricDifference( + Sets.union(cluster1.getInitBlobs(), cluster2.getInitBlobs()), + cluster1.getExistingBlobIds()).isEmpty()); + } + + @After + public void tearDown() { + DataStoreUtils.time = 0; + } + + class Cluster { + private DocumentNodeStore ds; + private int seed; + private BlobGarbageCollector gc; + + private Set initBlobs = new HashSet(); + + protected Set getInitBlobs() { + return initBlobs; + } + + public Cluster(final DocumentNodeStore ds, final String repoId, int seed) + throws IOException { + this.ds = ds; + this.gc = new BlobGarbageCollector() { + @Override + public void collectGarbage() throws Exception { + collectGarbage(false); + } + + @Override + public void collectGarbage(boolean markOnly) throws Exception { + MarkSweepGarbageCollector gc = new MarkSweepGarbageCollector( + new DocumentBlobReferenceRetriever(ds), + (GarbageCollectableBlobStore) ds.getBlobStore(), + MoreExecutors.sameThreadExecutor(), + "./target", 5, 0, repoId); + gc.collectGarbage(markOnly); + } + }; + + this.seed = seed; + } + + /** + * Creates the setup load with deletions. + * + * @throws Exception + */ + public void init() throws Exception { + NodeBuilder a = ds.getRoot().builder(); + + int number = 10; + // track the number of the assets to be deleted + List deletes = Lists.newArrayList(); + Random rand = new Random(47); + for (int i = 0; i < 5; i++) { + int n = rand.nextInt(number); + if (!deletes.contains(n)) { + deletes.add(n); + } + } + for (int i = 0; i < number; i++) { + Blob b = ds.createBlob(randomStream(i + seed, 4160)); + if (!deletes.contains(i)) { + Iterator idIter = + ((GarbageCollectableBlobStore) ds.getBlobStore()) + .resolveChunks(b.toString()); + while (idIter.hasNext()) { + initBlobs.add(idIter.next()); + } + } + a.child("c" + i).setProperty("x", b); + } + ds.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY); + + + a = ds.getRoot().builder(); + for (int id : deletes) { + a.child("c" + id).remove(); + ds.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY); + } + long maxAge = 10; // hours + // 1. Go past GC age and check no GC done as nothing deleted + clock.waitUntil(clock.getTime() + TimeUnit.MINUTES.toMillis(maxAge)); + + VersionGarbageCollector vGC = ds.getVersionGarbageCollector(); + VersionGCStats stats = vGC.gc(0, TimeUnit.MILLISECONDS); + Assert.assertEquals(deletes.size(), stats.deletedDocGCCount); + } + + public Set getExistingBlobIds() throws Exception { + GarbageCollectableBlobStore store = (GarbageCollectableBlobStore) ds.getBlobStore(); + Iterator cur = store.getAllChunkIds(0); + + Set existing = Sets.newHashSet(); + while (cur.hasNext()) { + existing.add(cur.next()); + } + return existing; + } + } +} Property changes on: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentDataStoreBlobGCTest.java =================================================================== --- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentDataStoreBlobGCTest.java (revision 1615951) +++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentDataStoreBlobGCTest.java (working copy) @@ -40,6 +40,7 @@ import org.apache.jackrabbit.oak.api.CommitFailedException; import org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; +import org.apache.jackrabbit.oak.plugins.identifier.ClusterRepositoryInfo; import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore; @@ -121,12 +122,13 @@ @Test public void gc() throws Exception { HashSet set = setUp(); + String repoId = ClusterRepositoryInfo.createId(nodeStore); MarkSweepGarbageCollector gc = new MarkSweepGarbageCollector( new SegmentBlobReferenceRetriever(store.getTracker()), (GarbageCollectableBlobStore) store.getBlobStore(), MoreExecutors.sameThreadExecutor(), - "./target", 2048, true, 0); + "./target", 2048, 0, repoId); gc.collectGarbage(); Set existing = iterate();