diff --git oak-commons/src/test/java/org/apache/jackrabbit/oak/commons/FixturesHelper.java oak-commons/src/test/java/org/apache/jackrabbit/oak/commons/FixturesHelper.java index ab84ce1..e5f6f86 100644 --- oak-commons/src/test/java/org/apache/jackrabbit/oak/commons/FixturesHelper.java +++ oak-commons/src/test/java/org/apache/jackrabbit/oak/commons/FixturesHelper.java @@ -43,7 +43,7 @@ public final class FixturesHelper { * default fixtures when no {@code nsfixtures} is provided */ public enum Fixture { - DOCUMENT_NS, @Deprecated SEGMENT_MK, DOCUMENT_RDB, MEMORY_NS, DOCUMENT_MEM, SEGMENT_TAR, MULTIPLEXED_SEGMENT, MULTIPLEXED_MEM + DOCUMENT_NS, DOCUMENT_RDB, MEMORY_NS, DOCUMENT_MEM, SEGMENT_TAR, MULTIPLEXED_SEGMENT, MULTIPLEXED_MEM } private static final Set FIXTURES; diff --git oak-it-osgi/pom.xml oak-it-osgi/pom.xml index 7881942..de3a16c 100644 --- oak-it-osgi/pom.xml +++ oak-it-osgi/pom.xml @@ -83,12 +83,6 @@ org.apache.jackrabbit - oak-segment - ${project.version} - test - - - org.apache.jackrabbit oak-jcr ${project.version} test diff --git oak-it/pom.xml oak-it/pom.xml index 357c535..20dbbe7 100644 --- oak-it/pom.xml +++ oak-it/pom.xml @@ -39,12 +39,6 @@ org.apache.jackrabbit - oak-segment - ${project.version} - test - - - org.apache.jackrabbit oak-segment-tar ${project.version} test diff --git oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java index a836c4b..ea096fe 100644 --- oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java +++ oak-it/src/test/java/org/apache/jackrabbit/oak/NodeStoreFixtures.java @@ -30,16 +30,12 @@ import org.apache.jackrabbit.oak.fixture.MemoryFixture; import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; import org.apache.jackrabbit.oak.plugins.multiplex.MultiplexingMemoryFixture; import org.apache.jackrabbit.oak.plugins.multiplex.MultiplexingSegmentFixture; -import org.apache.jackrabbit.oak.plugins.segment.fixture.SegmentFixture; import org.apache.jackrabbit.oak.segment.fixture.SegmentTarFixture; public class NodeStoreFixtures { public static final NodeStoreFixture MEMORY_NS = new MemoryFixture(); - @Deprecated - public static final NodeStoreFixture SEGMENT_MK = new SegmentFixture(); - public static final NodeStoreFixture SEGMENT_TAR = new SegmentTarFixture(); public static final NodeStoreFixture DOCUMENT_NS = new DocumentMongoFixture(); @@ -57,9 +53,6 @@ public class NodeStoreFixtures { if (fixtures.contains(FixturesHelper.Fixture.DOCUMENT_NS)) { configuredFixtures.add(DOCUMENT_NS); } - if (fixtures.contains(FixturesHelper.Fixture.SEGMENT_MK)) { - configuredFixtures.add(SEGMENT_MK); - } if (fixtures.contains(FixturesHelper.Fixture.MEMORY_NS)) { configuredFixtures.add(MEMORY_NS); } diff --git oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/multiplex/MultiplexingNodeStoreTest.java oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/multiplex/MultiplexingNodeStoreTest.java index de2b1a2..4bbf11c 100644 --- oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/multiplex/MultiplexingNodeStoreTest.java +++ oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/multiplex/MultiplexingNodeStoreTest.java @@ -35,7 +35,6 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.Closeable; import java.io.File; -import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -46,7 +45,6 @@ import java.util.concurrent.TimeUnit; import javax.annotation.Nullable; import javax.sql.DataSource; -import com.google.common.base.Predicates; import org.apache.commons.io.FileUtils; import org.apache.jackrabbit.oak.api.Blob; import org.apache.jackrabbit.oak.api.CommitFailedException; @@ -57,9 +55,11 @@ import org.apache.jackrabbit.oak.plugins.document.rdb.RDBDataSourceFactory; import org.apache.jackrabbit.oak.plugins.document.rdb.RDBOptions; import org.apache.jackrabbit.oak.plugins.document.util.CountingDiff; import org.apache.jackrabbit.oak.plugins.memory.MemoryNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.Segment; +import org.apache.jackrabbit.oak.segment.SegmentNodeStore; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.apache.jackrabbit.oak.spi.blob.FileBlobStore; import org.apache.jackrabbit.oak.spi.commit.CommitInfo; @@ -77,6 +77,7 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; import com.google.common.base.Predicate; +import com.google.common.base.Predicates; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; @@ -844,8 +845,8 @@ public class MultiplexingNodeStoreTest { BlobStore blobStore = new FileBlobStore(blobStorePath); - store = FileStore.builder(storePath).withBlobStore(blobStore).build(); - instance = SegmentNodeStore.builder(store).build(); + store = FileStoreBuilder.fileStoreBuilder(storePath).withBlobStore(blobStore).build(); + instance = SegmentNodeStoreBuilders.builder(store).build(); return instance; } diff --git oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/multiplex/MultiplexingSegmentFixture.java oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/multiplex/MultiplexingSegmentFixture.java index 10ba8b7..2c84ae9 100644 --- oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/multiplex/MultiplexingSegmentFixture.java +++ oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/multiplex/MultiplexingSegmentFixture.java @@ -21,8 +21,8 @@ package org.apache.jackrabbit.oak.plugins.multiplex; import java.io.IOException; import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.memory.MemoryStore; import org.apache.jackrabbit.oak.spi.mount.MountInfoProvider; import org.apache.jackrabbit.oak.spi.state.NodeStore; @@ -37,8 +37,8 @@ public class MultiplexingSegmentFixture extends NodeStoreFixture { .readOnlyMount("temp", MOUNT_PATH) .build(); - NodeStore globalStore = SegmentNodeStore.builder(new MemoryStore()).build(); - NodeStore tempMount = SegmentNodeStore.builder(new MemoryStore()).build(); + NodeStore globalStore = SegmentNodeStoreBuilders.builder(new MemoryStore()).build(); + NodeStore tempMount = SegmentNodeStoreBuilders.builder(new MemoryStore()).build(); return new MultiplexingNodeStore.Builder(mip, globalStore).addMount("temp", tempMount).build(); } catch (IOException e) { diff --git oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCachingDataStoreStatsTest.java oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCachingDataStoreStatsTest.java index 0ec9c88..4c0da15 100644 --- oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCachingDataStoreStatsTest.java +++ oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCachingDataStoreStatsTest.java @@ -18,12 +18,20 @@ */ package org.apache.jackrabbit.oak.plugins.segment; +import static com.google.common.collect.Maps.newHashMap; +import static org.apache.sling.testing.mock.osgi.MockOsgi.deactivate; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.mockito.Mockito.mock; + import java.io.File; import java.util.Map; import org.apache.jackrabbit.oak.api.jmx.ConsolidatedDataStoreCacheStatsMBean; import org.apache.jackrabbit.oak.plugins.blob.AbstractSharedCachingDataStore; import org.apache.jackrabbit.oak.plugins.blob.ConsolidatedDataStoreCacheStats; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreService; +import org.apache.jackrabbit.oak.segment.SegmentStoreProvider; import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.apache.jackrabbit.oak.spi.state.NodeStore; import org.apache.jackrabbit.oak.stats.StatisticsProvider; @@ -36,13 +44,8 @@ import org.junit.rules.ExpectedException; import org.junit.rules.TemporaryFolder; import org.osgi.framework.ServiceRegistration; -import static com.google.common.collect.Maps.newHashMap; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStoreService.CUSTOM_BLOB_STORE; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStoreService.DIRECTORY; -import static org.apache.sling.testing.mock.osgi.MockOsgi.deactivate; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.mockito.Mockito.mock; +import static org.apache.jackrabbit.oak.segment.SegmentNodeStoreService.CUSTOM_BLOB_STORE; +import static org.apache.jackrabbit.oak.segment.SegmentNodeStoreService.REPOSITORY_HOME_DIRECTORY; /** * Tests the registration of the {@link ConsolidatedDataStoreCacheStatsMBean}. @@ -107,7 +110,7 @@ public class SegmentCachingDataStoreStatsTest { Map properties = newHashMap(); properties.put(CUSTOM_BLOB_STORE, customBlobStore); - properties.put(DIRECTORY, folder.getRoot().getAbsolutePath()); + properties.put(REPOSITORY_HOME_DIRECTORY, folder.getRoot().getAbsolutePath()); segmentNodeStoreService = context.registerInjectActivateService(new SegmentNodeStoreService(), properties); } diff --git oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentS3DataStoreBlobGCIT.java oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentS3DataStoreBlobGCIT.java index bb8e447..62a2238 100644 --- oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentS3DataStoreBlobGCIT.java +++ oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentS3DataStoreBlobGCIT.java @@ -19,7 +19,6 @@ package org.apache.jackrabbit.oak.plugins.segment; -import static org.apache.jackrabbit.oak.commons.FixturesHelper.Fixture.SEGMENT_MK; import static org.apache.jackrabbit.oak.commons.FixturesHelper.Fixture.SEGMENT_TAR; import static org.apache.jackrabbit.oak.commons.FixturesHelper.getFixtures; import static org.junit.Assume.assumeTrue; @@ -61,7 +60,7 @@ public class SegmentS3DataStoreBlobGCIT extends SegmentDataStoreBlobGCIT { @BeforeClass public static void assumptions() { - assumeTrue(getFixtures().contains(SEGMENT_MK) || getFixtures().contains(SEGMENT_TAR)); + assumeTrue(getFixtures().contains(SEGMENT_TAR)); assumeTrue(S3DataStoreUtils.isS3Configured()); } diff --git oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentS3DataStoreStatsTest.java oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentS3DataStoreStatsTest.java index 55b0812..7b0552a 100644 --- oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentS3DataStoreStatsTest.java +++ oak-it/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentS3DataStoreStatsTest.java @@ -24,6 +24,8 @@ import java.util.Map; import org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3DataStoreStats; import org.apache.jackrabbit.oak.blob.cloud.aws.s3.SharedS3DataStore; import org.apache.jackrabbit.oak.blob.cloud.s3.stats.S3DataStoreStatsMBean; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreService; +import org.apache.jackrabbit.oak.segment.SegmentStoreProvider; import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.apache.jackrabbit.oak.spi.state.NodeStore; import org.apache.jackrabbit.oak.stats.StatisticsProvider; @@ -37,8 +39,8 @@ import org.junit.rules.TemporaryFolder; import org.osgi.framework.ServiceRegistration; import static com.google.common.collect.Maps.newHashMap; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStoreService.CUSTOM_BLOB_STORE; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStoreService.DIRECTORY; +import static org.apache.jackrabbit.oak.segment.SegmentNodeStoreService.CUSTOM_BLOB_STORE; +import static org.apache.jackrabbit.oak.segment.SegmentNodeStoreService.REPOSITORY_HOME_DIRECTORY; import static org.apache.sling.testing.mock.osgi.MockOsgi.deactivate; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; @@ -107,7 +109,7 @@ public class SegmentS3DataStoreStatsTest { Map properties = newHashMap(); properties.put(CUSTOM_BLOB_STORE, customBlobStore); - properties.put(DIRECTORY, folder.getRoot().getAbsolutePath()); + properties.put(REPOSITORY_HOME_DIRECTORY, folder.getRoot().getAbsolutePath()); segmentNodeStoreService = context.registerInjectActivateService(new SegmentNodeStoreService(), properties); } diff --git oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java index cf08128..441e6e5 100644 --- oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java +++ oak-it/src/test/java/org/apache/jackrabbit/oak/spi/state/NodeStoreTest.java @@ -39,11 +39,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import javax.annotation.Nonnull; -import javax.annotation.Nullable; - -import com.google.common.collect.Iterables; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; import org.apache.jackrabbit.oak.NodeStoreFixtures; import org.apache.jackrabbit.oak.OakBaseTest; @@ -65,6 +60,10 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; +import com.google.common.collect.Iterables; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + public class NodeStoreTest extends OakBaseTest { private NodeState root; @@ -457,7 +456,7 @@ public class NodeStoreTest extends OakBaseTest { public void moveToDescendant() { NodeBuilder test = store.getRoot().builder().getChildNode("test"); NodeBuilder x = test.getChildNode("x"); - if (fixture == NodeStoreFixtures.SEGMENT_TAR || fixture == NodeStoreFixtures.SEGMENT_MK || fixture == NodeStoreFixtures.MEMORY_NS + if (fixture == NodeStoreFixtures.SEGMENT_TAR || fixture == NodeStoreFixtures.MEMORY_NS || fixture == NodeStoreFixtures.MULTIPLEXED_SEGMENT || fixture == NodeStoreFixtures.MULTIPLEXED_MEM) { assertTrue(x.moveTo(x, "xx")); assertFalse(x.exists()); diff --git oak-jcr/pom.xml oak-jcr/pom.xml index b65e248..68319af 100644 --- oak-jcr/pom.xml +++ oak-jcr/pom.xml @@ -216,12 +216,6 @@ org.apache.jackrabbit - oak-segment - ${project.version} - test - - - org.apache.jackrabbit oak-segment-tar ${project.version} test @@ -247,13 +241,6 @@ org.apache.jackrabbit - oak-segment - ${project.version} - test-jar - test - - - org.apache.jackrabbit oak-segment-tar ${project.version} test-jar diff --git oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/LargeOperationIT.java oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/LargeOperationIT.java index 980d848..0cab14b 100644 --- oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/LargeOperationIT.java +++ oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/LargeOperationIT.java @@ -26,7 +26,6 @@ import static javax.jcr.observation.Event.NODE_ADDED; import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeTrue; -import java.io.File; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; @@ -49,9 +48,6 @@ import javax.jcr.SimpleCredentials; import javax.jcr.observation.EventIterator; import javax.jcr.observation.EventListener; -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.LoggerContext; -import com.google.common.collect.Lists; import org.apache.commons.math3.distribution.BinomialDistribution; import org.apache.commons.math3.exception.MathIllegalArgumentException; import org.apache.commons.math3.exception.MathInternalError; @@ -64,9 +60,7 @@ import org.apache.jackrabbit.oak.fixture.DocumentMongoFixture; import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; import org.apache.jackrabbit.oak.jcr.session.RefreshStrategy; import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.fixture.SegmentFixture; +import org.apache.jackrabbit.oak.segment.fixture.SegmentTarFixture; import org.apache.jackrabbit.oak.spi.state.NodeStore; import org.junit.After; import org.junit.Before; @@ -76,6 +70,11 @@ import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.collect.Lists; + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.LoggerContext; + /** * Scalability test asserting certain operations scale not worse than {@code O(n log n)} * in the size of their input. @@ -134,11 +133,8 @@ public class LargeOperationIT { @Parameterized.Parameters public static Collection fixtures() throws Exception { - File file = new File(new File("target"), "tar." + System.nanoTime()); - SegmentStore segmentStore = FileStore.builder(file).withMaxFileSize(256).withMemoryMapping(true).build(); - List fixtures = Lists.newArrayList(); - SegmentFixture segmentFixture = new SegmentFixture(segmentStore); + SegmentTarFixture segmentFixture = new SegmentTarFixture(); if (segmentFixture.isAvailable()) { fixtures.add(new Object[] {segmentFixture, SEGMENT_SCALES}); } diff --git oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/OakTarMKRepositoryStub.java oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/OakTarMKRepositoryStub.java index c124d75..ae1e133 100644 --- oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/OakTarMKRepositoryStub.java +++ oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/OakTarMKRepositoryStub.java @@ -18,13 +18,15 @@ package org.apache.jackrabbit.oak.jcr; import java.io.File; import java.util.Properties; + import javax.jcr.Repository; import javax.jcr.RepositoryException; import javax.jcr.Session; import org.apache.jackrabbit.oak.Oak; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; /** * A repository stub implementation for Oak on TarMK @@ -50,8 +52,8 @@ public class OakTarMKRepositoryStub extends OakRepositoryStub { try { File directory = new File("target", "tarmk-" + System.currentTimeMillis()); - this.store = FileStore.builder(directory).withMaxFileSize(1).withMemoryMapping(false).build(); - Jcr jcr = new Jcr(new Oak(SegmentNodeStore.builder(store).build())); + this.store = FileStoreBuilder.fileStoreBuilder(directory).withMaxFileSize(1).withMemoryMapping(false).build(); + Jcr jcr = new Jcr(new Oak(SegmentNodeStoreBuilders.builder(store).build())); jcr.with(getQueryEngineSettings()); preCreateRepository(jcr); this.repository = jcr.createRepository(); diff --git oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/ReferenceBinaryIT.java oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/ReferenceBinaryIT.java index 3a93e80..6137225 100644 --- oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/ReferenceBinaryIT.java +++ oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/ReferenceBinaryIT.java @@ -33,8 +33,6 @@ import javax.jcr.RepositoryException; import javax.jcr.Session; import javax.jcr.SimpleCredentials; -import com.google.common.collect.Lists; -import com.google.common.io.BaseEncoding; import org.apache.jackrabbit.api.JackrabbitRepository; import org.apache.jackrabbit.api.ReferenceBinary; import org.apache.jackrabbit.commons.jackrabbit.SimpleReferenceBinary; @@ -44,11 +42,8 @@ import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore; import org.apache.jackrabbit.oak.plugins.document.MongoUtils; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.fixture.SegmentFixture; +import org.apache.jackrabbit.oak.segment.fixture.SegmentTarFixture; import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.spi.blob.FileBlobStore; import org.apache.jackrabbit.oak.spi.state.NodeStore; import org.junit.After; import org.junit.Before; @@ -56,6 +51,9 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import com.google.common.collect.Lists; +import com.google.common.io.BaseEncoding; + @RunWith(Parameterized.class) public class ReferenceBinaryIT { @@ -134,29 +132,10 @@ public class ReferenceBinaryIT { @Parameterized.Parameters public static Collection fixtures() throws Exception { - File file = getTestDir("tar"); - SegmentStore segmentStore = FileStore.builder(file) - .withBlobStore(createBlobStore()) - .withMaxFileSize(256) - .withMemoryMapping(true) - .build(); - List fixtures = Lists.newArrayList(); - SegmentFixture segmentFixture = new SegmentFixture(segmentStore); - if (segmentFixture.isAvailable()) { - fixtures.add(new Object[] {segmentFixture}); - } - - FileBlobStore fbs = new FileBlobStore(getTestDir("fbs1").getAbsolutePath()); - fbs.setReferenceKeyPlainText("foobar"); - SegmentStore segmentStoreWithFBS = FileStore.builder(getTestDir("tar2")) - .withBlobStore(fbs) - .withMaxFileSize(256) - .withMemoryMapping(true) - .build(); - SegmentFixture segmentFixtureFBS = new SegmentFixture(segmentStoreWithFBS); - if (segmentFixtureFBS.isAvailable()) { - fixtures.add(new Object[] {segmentFixtureFBS}); + SegmentTarFixture segmentTarFixture = new SegmentTarFixture(); + if (segmentTarFixture.isAvailable()) { + fixtures.add(new Object[] {segmentTarFixture}); } DocumentMongoFixture documentFixture = new DocumentMongoFixture(MongoUtils.URL, createBlobStore()); diff --git oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/RefreshOnGCTest.java oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/RefreshOnGCTest.java index 8694dc3..cf91d23 100644 --- oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/RefreshOnGCTest.java +++ oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/RefreshOnGCTest.java @@ -20,28 +20,21 @@ package org.apache.jackrabbit.oak.jcr; import static java.io.File.createTempFile; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.CleanupType.CLEAN_NONE; import static org.apache.jackrabbit.oak.segment.file.FileStoreBuilder.fileStoreBuilder; import static org.junit.Assert.assertTrue; import java.io.File; -import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.FutureTask; -import javax.annotation.Nonnull; import javax.jcr.Node; import javax.jcr.Repository; import javax.jcr.Session; import javax.jcr.SimpleCredentials; -import com.google.common.collect.ImmutableList; import org.apache.jackrabbit.api.JackrabbitRepository; import org.apache.jackrabbit.oak.Oak; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; import org.apache.jackrabbit.oak.spi.gc.GCMonitor; import org.apache.jackrabbit.oak.spi.gc.GCMonitorTracker; @@ -51,20 +44,13 @@ import org.apache.jackrabbit.oak.spi.whiteboard.Whiteboard; import org.junit.After; import org.junit.Before; import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -@RunWith(Parameterized.class) public class RefreshOnGCTest { - private final Fixture fixture; - private Callable compact; private Repository repository; private GCMonitorTracker gcMonitor; enum Fixture { - SEGMENT_PERSISTED_MAP(true), - SEGMENT_MEMORY_MAP(false), SEGMENT_TAR(false); private final boolean persistedMap; @@ -78,43 +64,6 @@ public class RefreshOnGCTest { } } - @Parameterized.Parameters - public static List fixtures() { - return ImmutableList.of( - new Fixture[] {Fixture.SEGMENT_PERSISTED_MAP}, - new Fixture[] {Fixture.SEGMENT_MEMORY_MAP}, - new Fixture[] {Fixture.SEGMENT_TAR}); - } - - public RefreshOnGCTest(Fixture fixtures) { - this.fixture = fixtures; - } - - private NodeStore createSegmentStore(File directory, GCMonitor gcMonitor) throws Exception { - CompactionStrategy strategy = new CompactionStrategy( - false, false, CLEAN_NONE, 0, CompactionStrategy.MEMORY_THRESHOLD_DEFAULT) { - @Override - public boolean compacted(@Nonnull Callable setHead) throws Exception { - setHead.call(); - return true; - } - }; - strategy.setPersistCompactionMap(fixture.usePersistedMap()); - final FileStore fileStore = FileStore.builder(directory) - .withGCMonitor(gcMonitor) - .build() - .setCompactionStrategy(strategy); - - compact = new Callable() { - @Override - public Void call() throws Exception { - fileStore.compact(); - return null; - } - }; - return SegmentNodeStore.builder(fileStore).build(); - } - private NodeStore createSegmentTarStore(File directory, GCMonitor gcMonitor) throws Exception { final org.apache.jackrabbit.oak.segment.file.FileStore fileStore = fileStoreBuilder(directory).withGCMonitor(gcMonitor).build(); @@ -138,12 +87,7 @@ public class RefreshOnGCTest { gcMonitor = new GCMonitorTracker(); gcMonitor.start(whiteboard); - Oak oak; - if (fixture == Fixture.SEGMENT_TAR) { - oak = new Oak(createSegmentTarStore(directory, gcMonitor)); - } else { - oak = new Oak(createSegmentStore(directory, gcMonitor)); - } + Oak oak = new Oak(createSegmentTarStore(directory, gcMonitor)); oak.with(whiteboard); repository = new Jcr(oak).createRepository(); } diff --git oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/tck/TCKBase.java oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/tck/TCKBase.java index e28dadf..b6acc62 100644 --- oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/tck/TCKBase.java +++ oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/tck/TCKBase.java @@ -30,7 +30,6 @@ import org.apache.jackrabbit.oak.jcr.OakDocumentMemRepositoryStub; import org.apache.jackrabbit.oak.jcr.OakDocumentRDBRepositoryStub; import org.apache.jackrabbit.oak.jcr.OakMongoNSRepositoryStub; import org.apache.jackrabbit.oak.jcr.OakSegmentTarRepositoryStub; -import org.apache.jackrabbit.oak.jcr.OakTarMKRepositoryStub; import org.apache.jackrabbit.test.RepositoryHelper; import org.apache.jackrabbit.test.RepositoryHelperPool; import org.apache.jackrabbit.test.RepositoryHelperPoolImpl; @@ -54,9 +53,6 @@ public abstract class TCKBase extends TestSuite { public TCKBase(String name) { super(name); - if (FIXTURES.contains(Fixture.SEGMENT_MK)) { - Setup.wrap(this, OakTarMKRepositoryStub.class.getName()); - } if (FIXTURES.contains(Fixture.SEGMENT_TAR)) { Setup.wrap(this, OakSegmentTarRepositoryStub.class.getName()); } diff --git oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/version/HiddenNodeTest.java oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/version/HiddenNodeTest.java index 198306f..0e9d651 100644 --- oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/version/HiddenNodeTest.java +++ oak-jcr/src/test/java/org/apache/jackrabbit/oak/jcr/version/HiddenNodeTest.java @@ -16,6 +16,10 @@ */ package org.apache.jackrabbit.oak.jcr.version; +import static org.apache.jackrabbit.oak.jcr.AbstractRepositoryTest.dispose; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import javax.jcr.Node; import javax.jcr.Repository; import javax.jcr.Session; @@ -25,10 +29,10 @@ import javax.jcr.version.VersionManager; import org.apache.jackrabbit.oak.commons.PathUtils; import org.apache.jackrabbit.oak.jcr.Jcr; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; import org.apache.jackrabbit.oak.plugins.tree.impl.TreeConstants; import org.apache.jackrabbit.oak.plugins.version.VersionConstants; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.memory.MemoryStore; import org.apache.jackrabbit.oak.spi.commit.CommitInfo; import org.apache.jackrabbit.oak.spi.commit.EmptyHook; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; @@ -38,10 +42,6 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; -import static org.apache.jackrabbit.oak.jcr.AbstractRepositoryTest.dispose; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - /** * Checks if hidden nodes are properly handled on checkin and restore (OAK-1219, OAK-OAK-1226). */ @@ -54,7 +54,7 @@ public class HiddenNodeTest { @Before public void before() throws Exception { - store = SegmentNodeStore.builder(new MemoryStore()).build(); + store = SegmentNodeStoreBuilders.builder(new MemoryStore()).build(); repo = new Jcr(store).createRepository(); session = repo.login(new SimpleCredentials("admin", "admin".toCharArray())); vMgr = session.getWorkspace().getVersionManager(); diff --git oak-lucene/pom.xml oak-lucene/pom.xml index fdea0c0..becf681 100644 --- oak-lucene/pom.xml +++ oak-lucene/pom.xml @@ -272,12 +272,6 @@ org.apache.jackrabbit - oak-segment - ${project.version} - test - - - org.apache.jackrabbit oak-segment-tar ${project.version} test diff --git oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneBlobCacheTest.java oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneBlobCacheTest.java index ca831e3..f4d02d5 100644 --- oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneBlobCacheTest.java +++ oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneBlobCacheTest.java @@ -32,8 +32,9 @@ import org.apache.jackrabbit.core.data.DataRecord; import org.apache.jackrabbit.core.data.DataStoreException; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; import org.apache.jackrabbit.oak.spi.state.NodeState; import org.apache.jackrabbit.oak.spi.state.NodeStore; @@ -63,11 +64,11 @@ public class LuceneBlobCacheTest { public void setUp() throws Exception { fileDataStore = new ReadAccessCountingDataStore(); fileDataStore.init(tempFolder.newFolder().getAbsolutePath()); - FileStore.Builder fileStoreBuilder = FileStore.builder(tempFolder.newFolder()) + FileStoreBuilder fileStoreBuilder = FileStoreBuilder.fileStoreBuilder(tempFolder.newFolder()) .withBlobStore(new DataStoreBlobStore(fileDataStore)).withMaxFileSize(256) - .withCacheSize(64).withMemoryMapping(false); + .withSegmentCacheSize(64).withMemoryMapping(false); store = fileStoreBuilder.build(); - NodeStore nodeStore = SegmentNodeStore.builder(store).build(); + NodeStore nodeStore = SegmentNodeStoreBuilders.builder(store).build(); root = nodeStore.getRoot(); builder = root.builder(); } diff --git oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexTest.java oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexTest.java index c9eb9ff..1e73331 100644 --- oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexTest.java +++ oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexTest.java @@ -79,8 +79,6 @@ import org.apache.jackrabbit.oak.plugins.index.lucene.directory.LocalIndexDir; import org.apache.jackrabbit.oak.plugins.index.lucene.score.ScorerProvider; import org.apache.jackrabbit.oak.plugins.index.lucene.score.ScorerProviderFactory; import org.apache.jackrabbit.oak.plugins.memory.ArrayBasedBlob; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; import org.apache.jackrabbit.oak.query.NodeStateNodeTypeInfoProvider; import org.apache.jackrabbit.oak.query.QueryEngineSettings; import org.apache.jackrabbit.oak.query.ast.NodeTypeInfo; @@ -90,6 +88,8 @@ import org.apache.jackrabbit.oak.query.ast.SelectorImpl; import org.apache.jackrabbit.oak.query.fulltext.FullTextParser; import org.apache.jackrabbit.oak.query.fulltext.FullTextTerm; import org.apache.jackrabbit.oak.query.index.FilterImpl; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.memory.MemoryStore; import org.apache.jackrabbit.oak.spi.commit.CommitInfo; import org.apache.jackrabbit.oak.spi.commit.EditorHook; import org.apache.jackrabbit.oak.spi.commit.EmptyHook; @@ -708,7 +708,7 @@ public class LuceneIndexTest { //Issue is not reproducible with MemoryNodeBuilder and //MemoryNodeState as they cannot determine change in childNode without //entering - NodeStore nodeStore = SegmentNodeStore.builder(new MemoryStore()).build(); + NodeStore nodeStore = SegmentNodeStoreBuilders.builder(new MemoryStore()).build(); tracker = new IndexTracker(); ((Observable)nodeStore).addObserver(new Observer() { @Override diff --git oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/OakDirectoryTest.java oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/OakDirectoryTest.java index 1b5052b..50a71fb 100644 --- oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/OakDirectoryTest.java +++ oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/OakDirectoryTest.java @@ -56,9 +56,11 @@ import org.apache.jackrabbit.oak.api.Type; import org.apache.jackrabbit.oak.plugins.memory.ArrayBasedBlob; import org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState; import org.apache.jackrabbit.oak.plugins.memory.PropertyStates; -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.Segment; +import org.apache.jackrabbit.oak.segment.SegmentNodeStore; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.file.FileStore; +import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder; import org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; import org.apache.jackrabbit.oak.spi.state.NodeState; @@ -339,11 +341,11 @@ public class OakDirectoryTest { @Test public void largeFile() throws Exception{ - FileStore store = FileStore.builder(tempFolder.getRoot()) + FileStore store = FileStoreBuilder.fileStoreBuilder(tempFolder.getRoot()) .withMemoryMapping(false) .withBlobStore(new BlackHoleBlobStore()) .build(); - SegmentNodeStore nodeStore = SegmentNodeStore.builder(store).build(); + SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(store).build(); IndexDefinition defn = new IndexDefinition(INITIAL_CONTENT, EmptyNodeState.EMPTY_NODE, "/foo"); Directory directory = new OakDirectory(nodeStore.getRoot().builder(), defn, false); @@ -393,11 +395,11 @@ public class OakDirectoryTest { @Test public void dirNameInException_Writes() throws Exception{ FailOnDemandBlobStore blobStore = new FailOnDemandBlobStore(); - FileStore store = FileStore.builder(tempFolder.getRoot()) + FileStore store = FileStoreBuilder.fileStoreBuilder(tempFolder.getRoot()) .withMemoryMapping(false) .withBlobStore(blobStore) .build(); - SegmentNodeStore nodeStore = SegmentNodeStore.builder(store).build(); + SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(store).build(); String indexPath = "/foo/bar"; diff --git oak-parent/pom.xml oak-parent/pom.xml index 4722164..d88c4b7 100644 --- oak-parent/pom.xml +++ oak-parent/pom.xml @@ -222,10 +222,6 @@ !org.apache.jackrabbit.oak.plugins.index.lucene !org.apache.jackrabbit.oak.plugins.index.lucene.util !org.apache.jackrabbit.oak.security.authentication.ldap - !org.apache.jackrabbit.oak.plugins.backup - !org.apache.jackrabbit.oak.plugins.segment - !org.apache.jackrabbit.oak.plugins.segment.file - !org.apache.jackrabbit.oak.plugins.segment.http !org.apache.jackrabbit.oak.plugins.index.solr !org.apache.jackrabbit.oak.plugins.index.solr.configuration !org.apache.jackrabbit.oak.plugins.index.solr.index diff --git oak-pojosr/pom.xml oak-pojosr/pom.xml index 1188402..3fa47ed 100644 --- oak-pojosr/pom.xml +++ oak-pojosr/pom.xml @@ -89,11 +89,6 @@ org.apache.jackrabbit - oak-segment - ${project.version} - - - org.apache.jackrabbit oak-segment-tar ${project.version} diff --git oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/JaasConfigSpiTest.groovy oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/JaasConfigSpiTest.groovy index efa4cfd..d80fb80 100644 --- oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/JaasConfigSpiTest.groovy +++ oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/JaasConfigSpiTest.groovy @@ -54,7 +54,7 @@ class JaasConfigSpiTest extends AbstractRepositoryFactoryTest{ (AuthenticationConfiguration.PARAM_CONFIG_SPI_NAME) : 'FelixJaasProvider' ], 'org.apache.jackrabbit.oak.jcr.osgi.RepositoryManager' : [:], - 'org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStoreService' : [:] + 'org.apache.jackrabbit.oak.segment.SegmentNodeStoreService' : [:] ] } diff --git oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/JsonConfigRepFactoryTest.groovy oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/JsonConfigRepFactoryTest.groovy index 7f6e3fc..5c4c0e0 100644 --- oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/JsonConfigRepFactoryTest.groovy +++ oak-pojosr/src/test/groovy/org/apache/jackrabbit/oak/run/osgi/JsonConfigRepFactoryTest.groovy @@ -40,7 +40,7 @@ class JsonConfigRepFactoryTest extends AbstractRepositoryFactoryTest{ @Test public void testRepositoryTar() throws Exception { config[REPOSITORY_CONFIG] = [ - 'org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStoreService' : [:] + 'org.apache.jackrabbit.oak.segment.SegmentNodeStoreService' : [:] ] repository = repositoryFactory.getRepository(config); diff --git oak-remote/pom.xml oak-remote/pom.xml index 572c058..39ec275 100644 --- oak-remote/pom.xml +++ oak-remote/pom.xml @@ -84,19 +84,6 @@ org.apache.jackrabbit - oak-segment - ${project.version} - test - - - org.apache.jackrabbit - oak-segment - ${project.version} - test-jar - test - - - org.apache.jackrabbit oak-segment-tar ${project.version} test diff --git oak-run/pom.xml oak-run/pom.xml index 8c49822..846100b 100644 --- oak-run/pom.xml +++ oak-run/pom.xml @@ -159,11 +159,6 @@ org.apache.jackrabbit - oak-segment - ${project.version} - - - org.apache.jackrabbit oak-segment-tar ${project.version} diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/benchmark/BenchmarkRunner.java oak-run/src/main/java/org/apache/jackrabbit/oak/benchmark/BenchmarkRunner.java index 22959a0..440b4d5 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/benchmark/BenchmarkRunner.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/benchmark/BenchmarkRunner.java @@ -16,6 +16,8 @@ */ package org.apache.jackrabbit.oak.benchmark; +import static java.util.Arrays.asList; + import java.io.File; import java.io.PrintStream; import java.util.ArrayList; @@ -27,17 +29,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; -import com.codahale.metrics.ConsoleReporter; -import com.codahale.metrics.Counting; -import com.codahale.metrics.Metric; -import com.codahale.metrics.MetricFilter; -import com.codahale.metrics.MetricRegistry; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; -import com.google.common.util.concurrent.MoreExecutors; -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import joptsimple.OptionSpec; import org.apache.commons.io.FileUtils; import org.apache.jackrabbit.oak.benchmark.authentication.external.ExternalLoginTest; import org.apache.jackrabbit.oak.benchmark.authentication.external.PrincipalNameResolutionTest; @@ -53,7 +44,18 @@ import org.apache.jackrabbit.oak.plugins.metric.MetricStatisticsProvider; import org.apache.jackrabbit.oak.spi.xml.ImportBehavior; import org.apache.jackrabbit.oak.stats.StatisticsProvider; -import static java.util.Arrays.asList; +import com.codahale.metrics.ConsoleReporter; +import com.codahale.metrics.Counting; +import com.codahale.metrics.Metric; +import com.codahale.metrics.MetricFilter; +import com.codahale.metrics.MetricRegistry; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import com.google.common.util.concurrent.MoreExecutors; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; public class BenchmarkRunner { @@ -195,10 +197,6 @@ public class BenchmarkRunner { OakRepositoryFixture.getMongoNS(uri, dropDBAfterTest.value(options), cacheSize * MB), - OakRepositoryFixture.getTar( - base.value(options), 256, cacheSize, mmap.value(options)), - OakRepositoryFixture.getTarWithBlobStore(base.value(options), 256, cacheSize, - mmap.value(options), fdsCache.value(options)), OakRepositoryFixture.getSegmentTar(base.value(options), 256, cacheSize, mmap.value(options)), OakRepositoryFixture.getSegmentTarWithBlobStore(base.value(options), 256, cacheSize, @@ -275,9 +273,6 @@ public class BenchmarkRunner { wikipedia.value(options), flatStructure.value(options), report.value(options)), - new RepositoryGrowthTest(wikipedia.value(options), - base.value(options), - luceneIndexOnFS.value(options)), new CreateNodesBenchmark(), new ManyNodes(), new ObservationTest(), diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/benchmark/RepositoryGrowthTest.java oak-run/src/main/java/org/apache/jackrabbit/oak/benchmark/RepositoryGrowthTest.java deleted file mode 100644 index aaa1093..0000000 --- oak-run/src/main/java/org/apache/jackrabbit/oak/benchmark/RepositoryGrowthTest.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.benchmark; - -import java.io.File; -import java.io.IOException; -import java.util.Map; - -import javax.jcr.Repository; -import javax.jcr.RepositoryException; -import javax.jcr.Session; - -import com.google.common.collect.Maps; -import org.apache.commons.io.FileUtils; -import org.apache.jackrabbit.oak.Oak; -import org.apache.jackrabbit.oak.benchmark.wikipedia.WikipediaImport; -import org.apache.jackrabbit.oak.commons.IOUtils; -import org.apache.jackrabbit.oak.fixture.JcrCreator; -import org.apache.jackrabbit.oak.fixture.OakFixture; -import org.apache.jackrabbit.oak.fixture.OakRepositoryFixture; -import org.apache.jackrabbit.oak.fixture.RepositoryFixture; -import org.apache.jackrabbit.oak.jcr.Jcr; -import org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexEditorProvider; -import org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexProvider; -import org.apache.jackrabbit.oak.plugins.index.lucene.util.LuceneIndexHelper; -import org.apache.jackrabbit.oak.plugins.index.lucene.util.LuceneInitializerHelper; -import org.apache.jackrabbit.oak.spi.commit.Observer; -import org.apache.jackrabbit.oak.spi.query.QueryIndexProvider; - -import static org.apache.jackrabbit.oak.fixture.OakFixture.SegmentFixture; - -public class RepositoryGrowthTest extends WikipediaImport { - private final boolean luceneIndexOnFS; - private final File basedir; - private final Map indexDirs = Maps.newHashMap(); - - - public RepositoryGrowthTest(File dump, File basedir, boolean luceneIndexOnFS) { - super(dump, true, true); - this.luceneIndexOnFS = luceneIndexOnFS; - this.basedir = basedir; - } - - @Override - protected Repository[] setupCluster(final RepositoryFixture fixture) throws Exception { - if (fixture instanceof OakRepositoryFixture) { - return ((OakRepositoryFixture) fixture).setUpCluster(1, new JcrCreator() { - @Override - public Jcr customize(Oak oak) { - LuceneIndexProvider provider = new LuceneIndexProvider(); - String path = null; - if(luceneIndexOnFS){ - File indexDir = new File(basedir, "lucene-"+System.currentTimeMillis()); - path = indexDir.getAbsolutePath(); - indexDirs.put(fixture, indexDir); - } - oak.with((QueryIndexProvider) provider) - .with((Observer) provider) - .with(new LuceneIndexEditorProvider()) - .with(new LuceneInitializerHelper("luceneGlobal", LuceneIndexHelper.JR_PROPERTY_INCLUDES, - null, path, null)); - return new Jcr(oak); - } - }); - } - return super.setupCluster(fixture); - } - - @Override - protected void tearDown(RepositoryFixture fixture) throws IOException { - if (fixture instanceof OakRepositoryFixture) { - OakFixture oakFixture = ((OakRepositoryFixture) fixture).getOakFixture(); - if(oakFixture instanceof SegmentFixture){ - SegmentFixture sf = (SegmentFixture) oakFixture; - long size = sf.getStores()[0].size(); - - if(sf.getBlobStoreFixtures().length > 0) { - size = sf.getBlobStoreFixtures()[0].size(); - } - - File indexDir = indexDirs.get(fixture); - if(indexDir != null){ - size += FileUtils.sizeOfDirectory(indexDir); - } - System.out.printf("Repository size %s %s %n", fixture, IOUtils.humanReadableByteCount(size)); - } - } - super.tearDown(fixture); - } - - @Override - protected void batchDone(Session session, long start, int count) throws RepositoryException { - session.save(); - super.batchDone(session, start, count); - } -} diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/benchmark/RevisionGCTest.java oak-run/src/main/java/org/apache/jackrabbit/oak/benchmark/RevisionGCTest.java index 6766458..c6e4a23 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/benchmark/RevisionGCTest.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/benchmark/RevisionGCTest.java @@ -27,18 +27,16 @@ import javax.jcr.RepositoryException; import javax.jcr.Session; import javax.jcr.SimpleCredentials; -import com.google.common.base.Stopwatch; - import org.apache.jackrabbit.oak.Oak; import org.apache.jackrabbit.oak.fixture.JcrCreator; import org.apache.jackrabbit.oak.fixture.OakRepositoryFixture; import org.apache.jackrabbit.oak.fixture.RepositoryFixture; import org.apache.jackrabbit.oak.jcr.Jcr; import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; import org.apache.jackrabbit.oak.spi.state.NodeStore; +import com.google.common.base.Stopwatch; + /** * A benchmark to run RevisionGC. */ @@ -119,12 +117,7 @@ public class RevisionGCTest extends Benchmark { return ((DocumentNodeStore) nodeStore).getVersionGarbageCollector() .gc(0, TimeUnit.SECONDS).toString(); - } else if (nodeStore instanceof SegmentNodeStore) { - Field f = SegmentNodeStore.class.getDeclaredField("store"); - f.setAccessible(true); - ((SegmentStore) f.get(nodeStore)).gc(); - return ""; - } + } throw new IllegalArgumentException("Unknown node store: " + nodeStore.getClass().getName()); } diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/checkpoint/Checkpoints.java oak-run/src/main/java/org/apache/jackrabbit/oak/checkpoint/Checkpoints.java index a617c31..5e70a18 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/checkpoint/Checkpoints.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/checkpoint/Checkpoints.java @@ -24,22 +24,18 @@ import java.util.Set; import javax.annotation.Nonnull; -import com.google.common.io.Closer; import org.apache.jackrabbit.oak.api.PropertyState; import org.apache.jackrabbit.oak.api.Type; import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; import org.apache.jackrabbit.oak.spi.state.NodeState; +import com.google.common.io.Closer; + /** * A helper class to manage checkpoints on TarMK and DocumentMK. */ public abstract class Checkpoints { - public static Checkpoints onSegment(File path, Closer closer) throws IOException, InvalidFileStoreVersionException { - return SegmentCheckpoints.create(path, closer); - } - public static Checkpoints onSegmentTar(File path, Closer closer) throws IOException { return SegmentTarCheckpoints.create(path, closer); } diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/checkpoint/SegmentCheckpoints.java oak-run/src/main/java/org/apache/jackrabbit/oak/checkpoint/SegmentCheckpoints.java deleted file mode 100644 index c130949..0000000 --- oak-run/src/main/java/org/apache/jackrabbit/oak/checkpoint/SegmentCheckpoints.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.jackrabbit.oak.checkpoint; - -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.Set; - -import com.google.common.collect.Lists; -import com.google.common.io.Closer; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; - -final class SegmentCheckpoints extends Checkpoints { - - static Checkpoints create(File path, Closer closer) throws IOException, InvalidFileStoreVersionException { - return new SegmentCheckpoints(closer.register(FileStore.builder(path).build())); - } - - private final FileStore store; - - private SegmentCheckpoints(FileStore store) { - this.store = store; - } - - @Override - public List list() { - List list = Lists.newArrayList(); - NodeState ns = store.getHead().getChildNode("checkpoints"); - for (ChildNodeEntry cne : ns.getChildNodeEntries()) { - NodeState cneNs = cne.getNodeState(); - list.add(new CP(cne.getName(), - cneNs.getLong("created"), cneNs.getLong("timestamp"))); - } - return list; - } - - @Override - public long removeAll() { - SegmentNodeState head = store.getHead(); - NodeBuilder builder = head.builder(); - - NodeBuilder cps = builder.getChildNode("checkpoints"); - long cnt = cps.getChildNodeCount(Integer.MAX_VALUE); - builder.setChildNode("checkpoints"); - if (store.setHead(head, asSegmentNodeState(builder))) { - return cnt; - } else { - return -1; - } - } - - @Override - public long removeUnreferenced() { - SegmentNodeState head = store.getHead(); - - Set refs = getReferencedCheckpoints(head.getChildNode("root")); - - NodeBuilder builder = head.builder(); - NodeBuilder cps = builder.getChildNode("checkpoints"); - long cnt = 0; - for (String c : cps.getChildNodeNames()) { - if (refs.contains(c)) { - continue; - } - cps.getChildNode(c).remove(); - cnt++; - } - - if (store.setHead(head, asSegmentNodeState(builder))) { - return cnt; - } else { - return -1; - } - } - - @Override - public int remove(String cp) { - SegmentNodeState head = store.getHead(); - NodeBuilder builder = head.builder(); - - NodeBuilder cpn = builder.getChildNode("checkpoints") - .getChildNode(cp); - if (cpn.exists()) { - cpn.remove(); - if (store.setHead(head, asSegmentNodeState(builder))) { - return 1; - } else { - return -1; - } - } else { - return 0; - } - } - - private static SegmentNodeState asSegmentNodeState(NodeBuilder builder) { - return (SegmentNodeState) builder.getNodeState(); - } -} diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/console/Console.java oak-run/src/main/java/org/apache/jackrabbit/oak/console/Console.java index 48ea55b..9fe4e19 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/console/Console.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/console/Console.java @@ -25,24 +25,23 @@ import java.util.List; import javax.sql.DataSource; -import com.mongodb.MongoClientURI; -import com.mongodb.MongoURI; -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import joptsimple.OptionSpec; import org.apache.jackrabbit.core.data.FileDataStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; import org.apache.jackrabbit.oak.plugins.document.DocumentMK; import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore; import org.apache.jackrabbit.oak.plugins.document.rdb.RDBDataSourceFactory; import org.apache.jackrabbit.oak.plugins.document.util.MongoConnection; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.apache.jackrabbit.oak.spi.state.NodeStore; import org.codehaus.groovy.tools.shell.IO; +import com.mongodb.MongoClientURI; +import com.mongodb.MongoURI; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; + /** * A command line console. */ @@ -56,7 +55,6 @@ public class Console { OptionSpec shell = parser.accepts("shell", "run the shell after executing files"); OptionSpec readWrite = parser.accepts("read-write", "connect to repository in read-write mode"); OptionSpec fdsPathSpec = parser.accepts("fds-path", "Path to FDS store").withOptionalArg().defaultsTo(""); - OptionSpec segment = parser.accepts("segment", "Use oak-segment instead of oak-segment-tar"); OptionSpec help = parser.acceptsAll(asList("h", "?", "help"), "show help").forHelp(); // RDB specific options @@ -123,19 +121,6 @@ public class Console { } DocumentNodeStore store = builder.getNodeStore(); fixture = new MongoFixture(store); - } else if (options.has(segment)) { - FileStore.Builder fsBuilder = FileStore.builder(new File(nonOptions.get(0))) - .withMaxFileSize(256).withDefaultMemoryMapping(); - if (blobStore != null) { - fsBuilder.withBlobStore(blobStore); - } - FileStore store; - if (readOnly) { - store = fsBuilder.buildReadOnly(); - } else { - store = fsBuilder.build(); - } - fixture = new SegmentFixture(store); } else { fixture = SegmentTarFixture.create(new File(nonOptions.get(0)), readOnly, blobStore); } @@ -184,25 +169,4 @@ public class Console { nodeStore.dispose(); } } - - @Deprecated - private static class SegmentFixture implements NodeStoreFixture { - private final SegmentStore segmentStore; - private final SegmentNodeStore nodeStore; - - private SegmentFixture(SegmentStore segmentStore) { - this.segmentStore = segmentStore; - this.nodeStore = SegmentNodeStore.builder(segmentStore).build(); - } - - @Override - public NodeStore getStore() { - return nodeStore; - } - - @Override - public void close() throws IOException { - segmentStore.close(); - } - } } diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/console/NodeStoreOpener.java oak-run/src/main/java/org/apache/jackrabbit/oak/console/NodeStoreOpener.java index 7b6e39f..db7bd54 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/console/NodeStoreOpener.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/console/NodeStoreOpener.java @@ -33,10 +33,6 @@ import javax.jcr.Session; import javax.jcr.SimpleCredentials; import javax.sql.DataSource; -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import joptsimple.OptionSpec; - import org.apache.jackrabbit.core.data.FileDataStore; import org.apache.jackrabbit.oak.Oak; import org.apache.jackrabbit.oak.jcr.Jcr; @@ -49,9 +45,6 @@ import org.apache.jackrabbit.oak.plugins.index.lucene.IndexTracker; import org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexEditorProvider; import org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexProvider; import org.apache.jackrabbit.oak.plugins.index.lucene.hybrid.DocumentQueue; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.apache.jackrabbit.oak.spi.commit.Observer; import org.apache.jackrabbit.oak.spi.query.QueryIndexProvider; @@ -62,6 +55,10 @@ import com.google.common.util.concurrent.MoreExecutors; import com.mongodb.MongoClientURI; import com.mongodb.MongoURI; +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; + /** * A tool to open a node store from command line options */ @@ -74,7 +71,6 @@ public class NodeStoreOpener { .withRequiredArg().ofType(Integer.class).defaultsTo(0); OptionSpec readWriteOption = parser.accepts("read-write", "connect to repository in read-write mode"); OptionSpec fdsPathSpec = parser.accepts("fds-path", "Path to FDS store").withOptionalArg().defaultsTo(""); - OptionSpec segment = parser.accepts("segment", "Use oak-segment instead of oak-segment-tar"); OptionSpec help = parser.acceptsAll(asList("h", "?", "help"), "show help").forHelp(); // RDB specific options @@ -169,19 +165,6 @@ public class NodeStoreOpener { builder.memoryCacheSize(cacheSize * MB); } fixture = new MongoFixture(store); - } else if (options.has(segment)) { - FileStore.Builder fsBuilder = FileStore.builder(new File(nodeStore)) - .withMaxFileSize(256).withDefaultMemoryMapping(); - if (blobStore != null) { - fsBuilder.withBlobStore(blobStore); - } - FileStore store; - if (readOnly) { - store = fsBuilder.buildReadOnly(); - } else { - store = fsBuilder.build(); - } - fixture = new SegmentFixture(store); } else { fixture = SegmentTarFixture.create(new File(nodeStore), readOnly, blobStore); } @@ -206,27 +189,6 @@ public class NodeStoreOpener { } } - @Deprecated - public static class SegmentFixture implements NodeStoreFixture { - private final SegmentStore segmentStore; - private final SegmentNodeStore nodeStore; - - private SegmentFixture(SegmentStore segmentStore) { - this.segmentStore = segmentStore; - this.nodeStore = SegmentNodeStore.builder(segmentStore).build(); - } - - @Override - public NodeStore getStore() { - return nodeStore; - } - - @Override - public void close() throws IOException { - segmentStore.close(); - } - } - public static Session openSession(NodeStore nodeStore) throws RepositoryException { if (nodeStore == null) { return null; diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/explorer/Explorer.java oak-run/src/main/java/org/apache/jackrabbit/oak/explorer/Explorer.java index 30cca87..95a1fbc 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/explorer/Explorer.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/explorer/Explorer.java @@ -59,7 +59,6 @@ public class Explorer { public static void main(String[] args) throws IOException { OptionParser parser = new OptionParser(); - OptionSpec segment = parser.accepts("segment", "Use oak-segment instead of oak-segment-tar"); OptionSpec skipSizeCheck = parser.accepts("skip-size-check", "Don't compute the size of the records"); OptionSpec nonOptions = parser.nonOptions().ofType(File.class); OptionSet options = parser.parse(args); @@ -71,13 +70,7 @@ public class Explorer { File path = options.valuesOf(nonOptions).get(0); - ExplorerBackend backend; - - if (options.has(segment)) { - backend = new SegmentExplorerBackend(path); - } else { - backend = new SegmentTarExplorerBackend(path); - } + ExplorerBackend backend = new SegmentTarExplorerBackend(path); new Explorer(path, backend, options.has(skipSizeCheck)); } diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/explorer/SegmentExplorerBackend.java oak-run/src/main/java/org/apache/jackrabbit/oak/explorer/SegmentExplorerBackend.java deleted file mode 100644 index 999e26a..0000000 --- oak-run/src/main/java/org/apache/jackrabbit/oak/explorer/SegmentExplorerBackend.java +++ /dev/null @@ -1,311 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.jackrabbit.oak.explorer; - -import static com.google.common.collect.Sets.newHashSet; - -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.UUID; - -import com.google.common.collect.Maps; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.plugins.segment.FileStoreHelper; -import org.apache.jackrabbit.oak.plugins.segment.PCMAnalyser; -import org.apache.jackrabbit.oak.plugins.segment.RecordId; -import org.apache.jackrabbit.oak.plugins.segment.SegmentBlob; -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStateHelper; -import org.apache.jackrabbit.oak.plugins.segment.SegmentPropertyState; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.spi.state.NodeState; - -class SegmentExplorerBackend implements ExplorerBackend { - - private final File path; - - private FileStore.ReadOnlyStore store; - - private Map> index; - - SegmentExplorerBackend(File path) throws IOException { - this.path = path; - } - - @Override - public void open() throws IOException { - try { - store = FileStore.builder(path).buildReadOnly(); - } catch (InvalidFileStoreVersionException e) { - throw new IllegalStateException(e); - } - - index = store.getTarReaderIndex(); - } - - @Override - public void close() { - store.close(); - store = null; - index = null; - } - - @Override - public List readRevisions() { - return FileStoreHelper.readRevisions(path); - } - - @Override - public Map> getTarReaderIndex() { - return store.getTarReaderIndex(); - } - - @Override - public Map> getTarGraph(String file) throws IOException { - return store.getTarGraph(file); - } - - @Override - public List getTarFiles() { - return FileStoreHelper.getTarFiles(store); - } - - @Override - public void getGcRoots(UUID uuidIn, Map>> links) throws IOException { - FileStoreHelper.getGcRoots(store, uuidIn, links); - } - - @Override - public Set getReferencedSegmentIds() { - Set ids = newHashSet(); - - for (SegmentId id : store.getTracker().getReferencedSegmentIds()) { - ids.add(id.asUUID()); - } - - return ids; - } - - @Override - public NodeState getHead() { - return store.getHead(); - } - - @Override - public NodeState readNodeState(String recordId) { - return new SegmentNodeState(RecordId.fromString(store.getTracker(), recordId)); - } - - @Override - public void setRevision(String revision) { - store.setRevision(revision); - } - - @Override - public boolean isPersisted(NodeState state) { - return state instanceof SegmentNodeState; - } - - @Override - public boolean isPersisted(PropertyState state) { - return state instanceof SegmentPropertyState; - } - - @Override - public String getRecordId(NodeState state) { - if (state instanceof SegmentNodeState) { - return getRecordId((SegmentNodeState) state); - } - - return null; - } - - @Override - public UUID getSegmentId(NodeState state) { - if (state instanceof SegmentNodeState) { - return getSegmentId((SegmentNodeState) state); - } - - return null; - } - - @Override - public String getRecordId(PropertyState state) { - if (state instanceof SegmentPropertyState) { - return getRecordId((SegmentPropertyState) state); - } - - return null; - } - - @Override - public UUID getSegmentId(PropertyState state) { - if (state instanceof SegmentPropertyState) { - return getSegmentId((SegmentPropertyState) state); - } - - return null; - } - - @Override - public String getTemplateRecordId(NodeState state) { - if (state instanceof SegmentNodeState) { - return getTemplateRecordId((SegmentNodeState) state); - } - - return null; - } - - @Override - public UUID getTemplateSegmentId(NodeState state) { - if (state instanceof SegmentNodeState) { - return getTemplateSegmentId((SegmentNodeState) state); - } - - return null; - } - - @Override - public String getFile(NodeState state) { - if (state instanceof SegmentNodeState) { - return getFile((SegmentNodeState) state); - } - - return null; - } - - @Override - public String getFile(PropertyState state) { - if (state instanceof SegmentPropertyState) { - return getFile((SegmentPropertyState) state); - } - - return null; - } - - @Override - public String getTemplateFile(NodeState state) { - if (state instanceof SegmentNodeState) { - return getTemplateFile((SegmentNodeState) state); - } - - return null; - } - - @Override - public Map getBulkSegmentIds(Blob blob) { - Map result = Maps.newHashMap(); - - for (SegmentId segmentId : SegmentBlob.getBulkSegmentIds(blob)) { - result.put(segmentId.asUUID(), getFile(segmentId)); - } - - return result; - } - - @Override - public String getPersistedCompactionMapStats() { - return new PCMAnalyser(store).toString(); - } - - @Override - public boolean isExternal(Blob blob) { - if (blob instanceof SegmentBlob) { - return isExternal((SegmentBlob) blob); - } - - return false; - } - - private boolean isExternal(SegmentBlob blob) { - return blob.isExternal(); - } - - private String getRecordId(SegmentNodeState state) { - return state.getRecordId().toString(); - } - - private UUID getSegmentId(SegmentNodeState state) { - return state.getRecordId().getSegmentId().asUUID(); - } - - private String getRecordId(SegmentPropertyState state) { - return state.getRecordId().toString(); - } - - private UUID getSegmentId(SegmentPropertyState state) { - return state.getRecordId().getSegmentId().asUUID(); - } - - private String getTemplateRecordId(SegmentNodeState state) { - RecordId recordId = SegmentNodeStateHelper.getTemplateId(state); - - if (recordId == null) { - return null; - } - - return recordId.toString(); - } - - private UUID getTemplateSegmentId(SegmentNodeState state) { - RecordId recordId = SegmentNodeStateHelper.getTemplateId(state); - - if (recordId == null) { - return null; - } - - return recordId.getSegmentId().asUUID(); - } - - private String getFile(SegmentNodeState state) { - return getFile(state.getRecordId().getSegmentId()); - } - - private String getFile(SegmentPropertyState state) { - return getFile(state.getRecordId().getSegmentId()); - } - - private String getTemplateFile(SegmentNodeState state) { - RecordId recordId = SegmentNodeStateHelper.getTemplateId(state); - - if (recordId == null) { - return null; - } - - return getFile(recordId.getSegmentId()); - } - - private String getFile(SegmentId segmentId) { - for (Entry> path2Uuid : index.entrySet()) { - for (UUID uuid : path2Uuid.getValue()) { - if (uuid.equals(segmentId.asUUID())) { - return new File(path2Uuid.getKey()).getName(); - } - } - } - return null; - } - -} diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakFixture.java oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakFixture.java index e96edf9..72e1ca9 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakFixture.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakFixture.java @@ -22,7 +22,6 @@ import java.net.UnknownHostException; import javax.sql.DataSource; -import org.apache.commons.io.FileUtils; import org.apache.jackrabbit.oak.Oak; import org.apache.jackrabbit.oak.plugins.document.DocumentMK; import org.apache.jackrabbit.oak.plugins.document.rdb.RDBBlobStore; @@ -30,11 +29,7 @@ import org.apache.jackrabbit.oak.plugins.document.rdb.RDBDataSourceFactory; import org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore; import org.apache.jackrabbit.oak.plugins.document.rdb.RDBOptions; import org.apache.jackrabbit.oak.plugins.document.util.MongoConnection; -import org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState; import org.apache.jackrabbit.oak.plugins.memory.MemoryNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.apache.jackrabbit.oak.spi.state.NodeStore; import org.apache.jackrabbit.oak.stats.StatisticsProvider; @@ -51,12 +46,6 @@ public abstract class OakFixture { public static final String OAK_RDB = "Oak-RDB"; public static final String OAK_RDB_DS = "Oak-RDB-DS"; - @Deprecated - public static final String OAK_TAR = "Oak-Tar"; - - @Deprecated - public static final String OAK_TAR_DS = "Oak-Tar-DS"; - public static final String OAK_SEGMENT_TAR = "Oak-Segment-Tar"; public static final String OAK_SEGMENT_TAR_DS = "Oak-Segment-Tar-DS"; @@ -261,26 +250,12 @@ public abstract class OakFixture { }; } - @Deprecated - public static OakFixture getTar( - final String name, final File base, final int maxFileSizeMB, final int cacheSizeMB, - final boolean memoryMapping, final boolean useBlobStore) { - return getTar(name, base, maxFileSizeMB, cacheSizeMB, memoryMapping, useBlobStore, 0); - } - public static OakFixture getSegmentTar(final String name, final File base, final int maxFileSizeMB, final int cacheSizeMB, final boolean memoryMapping, final boolean useBlobStore) { return getSegmentTar(name, base, maxFileSizeMB, cacheSizeMB, memoryMapping, useBlobStore, 0); } - @Deprecated - public static OakFixture getTar( - final String name, final File base, final int maxFileSizeMB, final int cacheSizeMB, - final boolean memoryMapping, final boolean useBlobStore, int dsCacheInMB) { - return new SegmentFixture(name, base, maxFileSizeMB, cacheSizeMB, memoryMapping, useBlobStore, dsCacheInMB); - } - public static OakFixture getSegmentTar(final String name, final File base, final int maxFileSizeMB, final int cacheSizeMB, final boolean memoryMapping, final boolean useBlobStore, final int dsCacheInMB) { @@ -405,89 +380,6 @@ public abstract class OakFixture { } - @Deprecated - public static class SegmentFixture extends OakFixture { - private FileStore[] stores; - private BlobStoreFixture[] blobStoreFixtures = new BlobStoreFixture[0]; - private final File base; - private final int maxFileSizeMB; - private final int cacheSizeMB; - private final boolean memoryMapping; - private final boolean useBlobStore; - private final int dsCacheSizeInMB; - - public SegmentFixture(String name, File base, int maxFileSizeMB, int cacheSizeMB, - boolean memoryMapping, boolean useBlobStore, int dsCacheSizeInMB) { - super(name); - this.base = base; - this.maxFileSizeMB = maxFileSizeMB; - this.cacheSizeMB = cacheSizeMB; - this.memoryMapping = memoryMapping; - this.useBlobStore = useBlobStore; - this.dsCacheSizeInMB = dsCacheSizeInMB; - } - - @Override - public Oak getOak(int clusterId) throws Exception { - FileStore fs = FileStore.builder(base) - .withMaxFileSize(maxFileSizeMB) - .withCacheSize(cacheSizeMB) - .withMemoryMapping(memoryMapping) - .build(); - return newOak(SegmentNodeStore.builder(fs).build()); - } - - @Override - public Oak[] setUpCluster(int n, StatisticsProvider statsProvider) throws Exception { - Oak[] cluster = new Oak[n]; - stores = new FileStore[cluster.length]; - if (useBlobStore) { - blobStoreFixtures = new BlobStoreFixture[cluster.length]; - } - - for (int i = 0; i < cluster.length; i++) { - BlobStore blobStore = null; - if (useBlobStore) { - blobStoreFixtures[i] = - BlobStoreFixture.create(base, true, dsCacheSizeInMB, statsProvider); - blobStore = blobStoreFixtures[i].setUp(); - } - - FileStore.Builder builder = FileStore.builder(new File(base, unique)); - if (blobStore != null) { - builder.withBlobStore(blobStore); - } - stores[i] = builder.withRoot(EmptyNodeState.EMPTY_NODE) - .withStatisticsProvider(statsProvider) - .withMaxFileSize(maxFileSizeMB) - .withCacheSize(cacheSizeMB) - .withMemoryMapping(memoryMapping) - .build(); - cluster[i] = newOak(SegmentNodeStore.builder(stores[i]).build()); - } - return cluster; - } - - @Override - public void tearDownCluster() { - for (SegmentStore store : stores) { - store.close(); - } - for (BlobStoreFixture blobStore : blobStoreFixtures) { - blobStore.tearDown(); - } - FileUtils.deleteQuietly(new File(base, unique)); - } - - public BlobStoreFixture[] getBlobStoreFixtures() { - return blobStoreFixtures; - } - - public FileStore[] getStores() { - return stores; - } - } - static Oak newOak(NodeStore nodeStore) { return new Oak(nodeStore).with(ManagementFactory.getPlatformMBeanServer()); } diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakRepositoryFixture.java oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakRepositoryFixture.java index 34698e6..4233236 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakRepositoryFixture.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/fixture/OakRepositoryFixture.java @@ -94,21 +94,6 @@ public class OakRepositoryFixture implements RepositoryFixture { dropDBAfterTest, cacheSize, true, base, fdsCacheInMB)); } - @Deprecated - public static RepositoryFixture getTar(File base, int maxFileSizeMB, int cacheSizeMB, - boolean memoryMapping) { - return new OakRepositoryFixture(OakFixture - .getTar(OakFixture.OAK_TAR, base, maxFileSizeMB, cacheSizeMB, memoryMapping, false)); - } - - @Deprecated - public static RepositoryFixture getTarWithBlobStore(File base, int maxFileSizeMB, - int cacheSizeMB, boolean memoryMapping, int dsCacheInMB) { - return new OakRepositoryFixture(OakFixture - .getTar(OakFixture.OAK_TAR_DS, base, maxFileSizeMB, cacheSizeMB, memoryMapping, true, - dsCacheInMB)); - } - public static RepositoryFixture getSegmentTar(File base, int maxFileSizeMB, int cacheSizeMB, boolean memoryMapping) { return new OakRepositoryFixture(OakFixture diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/segment/FileStoreHelper.java oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/segment/FileStoreHelper.java deleted file mode 100644 index fb3db46..0000000 --- oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/segment/FileStoreHelper.java +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Sets.newHashSet; -import static java.util.Collections.reverseOrder; -import static java.util.Collections.sort; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.LATEST_VERSION; - -import java.io.File; -import java.io.IOException; -import java.util.AbstractMap.SimpleImmutableEntry; -import java.util.ArrayDeque; -import java.util.Deque; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.UUID; - -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore.ReadOnlyStore; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.plugins.segment.file.JournalReader; -import org.apache.jackrabbit.oak.plugins.segment.file.tooling.BasicReadOnlyBlobStore; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; - -public final class FileStoreHelper { - - public static final String newline = "\n"; - - public static final boolean TAR_STORAGE_MEMORY_MAPPED = Boolean.getBoolean("tar.memoryMapped"); - - public static final int TAR_SEGMENT_CACHE_SIZE = Integer.getInteger("cache", 256); - - private FileStoreHelper() { - } - - /** - * Helper method to determine the segment version of the segment - * containing the current root node state. - * @param fileStore - * @return - */ - public static SegmentVersion getSegmentVersion(FileStore fileStore) { - return fileStore.getHead().getRecordId().getSegment().getSegmentVersion(); - } - - public static List getTarFiles(FileStore store) { - List files = newArrayList(); - for (String p : store.getTarReaderIndex().keySet()) { - files.add(new File(p).getName()); - } - sort(files, reverseOrder()); - return files; - } - - public static void getGcRoots(FileStore store, UUID uuidIn, - Map>> links) throws IOException { - Deque todos = new ArrayDeque(); - todos.add(uuidIn); - Set visited = newHashSet(); - while (!todos.isEmpty()) { - UUID uuid = todos.remove(); - if (!visited.add(uuid)) { - continue; - } - for (String f : getTarFiles(store)) { - Map> graph = store.getTarGraph(f); - for (Entry> g : graph.entrySet()) { - if (g.getValue() != null && g.getValue().contains(uuid)) { - UUID uuidP = g.getKey(); - if (!todos.contains(uuidP)) { - todos.add(uuidP); - Set> deps = links.get(uuid); - if (deps == null) { - deps = newHashSet(); - links.put(uuid, deps); - } - deps.add(new SimpleImmutableEntry( - uuidP, f)); - } - } - } - } - } - } - - public static void printGcRoots(StringBuilder sb, - Map>> links, UUID uuid, String space, - String inc) { - Set> roots = links.remove(uuid); - if (roots == null || roots.isEmpty()) { - return; - } - // TODO is sorting by file name needed? - for (Entry r : roots) { - sb.append(space + r.getKey() + "[" + r.getValue() + "]"); - sb.append(newline); - printGcRoots(sb, links, r.getKey(), space + inc, inc); - } - } - - public static List readRevisions(File store) { - File journal = new File(store, "journal.log"); - if (!journal.exists()) { - return newArrayList(); - } - - List revs = newArrayList(); - JournalReader journalReader = null; - try { - journalReader = new JournalReader(journal); - try { - revs = newArrayList(journalReader.iterator()); - } finally { - journalReader.close(); - } - } catch (IOException e) { - e.printStackTrace(); - } finally { - try { - if (journalReader != null) { - journalReader.close(); - } - } catch (IOException e) { - } - } - return revs; - } - - public static File isValidFileStoreOrFail(File store) { - checkArgument(isValidFileStore(store), "Invalid FileStore directory " - + store); - return store; - } - - /** - * Checks if the provided directory is a valid FileStore - * - * @return true if the provided directory is a valid FileStore - */ - public static boolean isValidFileStore(File store) { - if (!store.exists()) { - return false; - } - if (!store.isDirectory()) { - return false; - } - // for now the only check is the existence of the journal file - for (String f : store.list()) { - if ("journal.log".equals(f)) { - return true; - } - } - return false; - } - - public static File checkFileStoreVersionOrFail(String path, boolean force) throws IOException, InvalidFileStoreVersionException { - File directory = new File(path); - if (!directory.exists()) { - return directory; - } - FileStore store = openReadOnlyFileStore(directory); - try { - SegmentVersion segmentVersion = getSegmentVersion(store); - if (segmentVersion != LATEST_VERSION) { - if (force) { - System.out - .printf("Segment version mismatch. Found %s, expected %s. Forcing execution.\n", - segmentVersion, LATEST_VERSION); - } else { - throw new RuntimeException( - String.format( - "Segment version mismatch. Found %s, expected %s. Aborting.", - segmentVersion, LATEST_VERSION)); - } - } - } finally { - store.close(); - } - return directory; - } - - public static FileStore openFileStore(String directory) throws IOException, InvalidFileStoreVersionException { - return openFileStore(directory, false); - } - - public static FileStore openFileStore(String directory, boolean force) - throws IOException, InvalidFileStoreVersionException { - return FileStore.builder(checkFileStoreVersionOrFail(directory, force)) - .withCacheSize(TAR_SEGMENT_CACHE_SIZE) - .withMemoryMapping(TAR_STORAGE_MEMORY_MAPPED).build(); - } - - public static FileStore openFileStore(String directory, boolean force, - BlobStore blobStore - ) throws IOException, InvalidFileStoreVersionException { - return FileStore.builder(checkFileStoreVersionOrFail(directory, force)) - .withCacheSize(TAR_SEGMENT_CACHE_SIZE) - .withMemoryMapping(TAR_STORAGE_MEMORY_MAPPED) - .withBlobStore(blobStore).build(); - } - - public static ReadOnlyStore openReadOnlyFileStore(File directory) - throws IOException, InvalidFileStoreVersionException { - return FileStore.builder(isValidFileStoreOrFail(directory)) - .withCacheSize(TAR_SEGMENT_CACHE_SIZE) - .withMemoryMapping(TAR_STORAGE_MEMORY_MAPPED) - .buildReadOnly(); - } - - public static ReadOnlyStore openReadOnlyFileStore(File directory, - BlobStore blobStore - ) throws IOException, InvalidFileStoreVersionException { - return FileStore.builder(isValidFileStoreOrFail(directory)) - .withCacheSize(TAR_SEGMENT_CACHE_SIZE) - .withMemoryMapping(TAR_STORAGE_MEMORY_MAPPED) - .withBlobStore(blobStore) - .buildReadOnly(); - } - - public static BlobStore newBasicReadOnlyBlobStore() { - return new BasicReadOnlyBlobStore(); - } - -} diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/segment/PCMAnalyser.java oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/segment/PCMAnalyser.java deleted file mode 100644 index d689d43..0000000 --- oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/segment/PCMAnalyser.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Queues.newArrayDeque; -import static com.google.common.collect.Sets.newHashSet; -import static org.apache.jackrabbit.oak.plugins.segment.PersistedCompactionMap.PERSISTED_COMPACTION_MAP; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentId.isDataSegmentId; - -import java.io.File; -import java.util.Deque; -import java.util.Formatter; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; - -/** - * RecordUsageAnalyser tailored to extract PersistedCompactionMap history and - * size - */ -public class PCMAnalyser extends RecordUsageAnalyser { - - /** - * Extracts persisted compaction map information. Returns a list of - * compaction map chains - */ - private static List> readPCMHistory(FileStore store) { - List> pcms = newArrayList(); - Deque chain = newArrayDeque(); - - Map> index = store.getTarReaderIndex(); - for (String path : index.keySet()) { - Set segments = index.get(path); - String name = new File(path).getName(); - - for (UUID id : segments) { - if (!isDataSegmentId(id.getLeastSignificantBits())) { - continue; - } - Segment s = readSegment(store, id); - for (int r = 0; r < s.getRootCount(); r++) { - if (s.getRootType(r) == RecordType.VALUE) { - RecordId nodeId = new RecordId(s.getSegmentId(), - s.getRootOffset(r)); - String v = Segment.readString(nodeId); - PCMInfo pcm = parsePCMInfo(v, store, name); - if (pcm != null) { - if (!pcm.sameMap(chain.peekLast())) { - pcms.add(chain); - chain = newArrayDeque(); - } - chain.addLast(pcm); - } - } - } - } - } - if (!chain.isEmpty()) { - pcms.add(chain); - } - return pcms; - } - - /** - * Extracts persisted compaction map information, if available, otherwise - * returs null - */ - private static PCMInfo parsePCMInfo(String mapInfo, FileStore store, - String file) { - if (mapInfo == null || !mapInfo.startsWith(PERSISTED_COMPACTION_MAP)) { - return null; - } - SegmentTracker tracker = store.getTracker(); - int idStartIndex = mapInfo.indexOf("id=") + 3; - int idEndIndex = mapInfo.indexOf(",", idStartIndex); - String id = mapInfo.substring(idStartIndex, idEndIndex); - RecordId rid = null; - try { - rid = RecordId.fromString(tracker, id); - } catch (IllegalArgumentException iae) { - // log a warn? - return null; - } - - int baseStartIndex = mapInfo.indexOf("baseId=") + 7; - String base = mapInfo.substring(baseStartIndex, mapInfo.length() - 1); - RecordId bid = null; - if (!"null".equals(base)) { - try { - bid = RecordId.fromString(tracker, base); - } catch (IllegalArgumentException iae) { - // log a warn? - } - } - return new PCMInfo(rid, bid, file); - } - - private static Segment readSegment(FileStore store, UUID id) { - return store.readSegment(new SegmentId(store.getTracker(), id - .getMostSignificantBits(), id.getLeastSignificantBits())); - } - - private final List> pcms; - private final Set errors = newHashSet(); - - public PCMAnalyser(FileStore store) { - pcms = readPCMHistory(store); - for (Deque pcm : pcms) { - try { - onPCM(pcm.getFirst().getId()); - } catch (IllegalStateException ex) { - ex.printStackTrace(); - errors.add(ex.getMessage()); - } - } - } - - private void onPCM(RecordId recordId) { - Segment s = recordId.getSegment(); - MapRecord map = s.readMap(recordId); - parseMap(null, recordId, map); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - @SuppressWarnings("resource") - Formatter formatter = new Formatter(sb); - if (pcms.isEmpty()) { - formatter.format("No persisted compaction map found.%n"); - } else { - formatter.format("Persisted compaction map info:%n"); - for (Deque pcm : pcms) { - formatter.format("%s%n", pcm); - } - formatter.format("Persisted compaction map size:%n"); - sb.append(super.toString()); - - formatter.format("%n"); - for (String e : errors) { - formatter.format("%s%n", e); - } - } - return sb.toString(); - } - - private static class PCMInfo { - - private final RecordId id; - private final RecordId baseId; - private final String file; - - public PCMInfo(RecordId id, RecordId baseId, String file) { - this.id = checkNotNull(id); - this.baseId = baseId; - this.file = file; - } - - @Override - public String toString() { - return id + "[" + file + "]"; - } - - public RecordId getId() { - return id; - } - - public RecordId getBaseId() { - return baseId; - } - - /** - * Determines if the current PCM can be considered as the next link in - * the current compaction map. If provided 'o' is null, then the current - * PCM is the head - */ - public boolean sameMap(PCMInfo o) { - if (o == null) { - return true; - } - return id.equals(o.getBaseId()); - } - } - -} diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStateHelper.java oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStateHelper.java deleted file mode 100644 index 4e329e9..0000000 --- oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStateHelper.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -public class SegmentNodeStateHelper { - - private SegmentNodeStateHelper() { - - } - - public static RecordId getTemplateId(SegmentNodeState s) { - return s.getTemplateId(); - } - -} diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreRevisionRecovery.java oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreRevisionRecovery.java deleted file mode 100644 index 6540ccf..0000000 --- oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreRevisionRecovery.java +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static com.google.common.collect.Maps.newTreeMap; -import static com.google.common.collect.Sets.difference; -import static java.util.Arrays.asList; -import static java.util.Collections.reverseOrder; -import static java.util.Collections.sort; -import static org.apache.jackrabbit.oak.plugins.segment.FileStoreHelper.readRevisions; - -import java.io.File; -import java.io.FilenameFilter; -import java.io.IOException; -import java.util.List; -import java.util.Map.Entry; -import java.util.Set; -import java.util.SortedMap; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; - -import com.google.common.collect.ImmutableSet; -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import joptsimple.OptionSpec; -import org.apache.jackrabbit.oak.plugins.segment.RecordId; -import org.apache.jackrabbit.oak.plugins.segment.RecordType; -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNotFoundException; -import org.apache.jackrabbit.oak.plugins.segment.SegmentVersion; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore.ReadOnlyStore; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; - -public class FileStoreRevisionRecovery { - - @SuppressWarnings("deprecation") - public static void main(String[] args) throws Exception { - if (args.length == 0) { - System.out - .println("java -jar oak-run-*.jar tarmkrecovery [--version-v10]"); - System.exit(0); - } - OptionParser parser = new OptionParser(); - OptionSpec help = parser.acceptsAll(asList("h", "?", "help"), - "show help").forHelp(); - - OptionSpec storeO = parser.nonOptions( - "Path to segment store (required)").ofType(File.class); - OptionSpec v10 = parser - .accepts("version-v10", "Use V10 for reading"); - - OptionSet options = parser.parse(args); - - if (options.has(help)) { - parser.printHelpOn(System.out); - System.exit(0); - } - - File store = storeO.value(options); - if (store == null) { - parser.printHelpOn(System.out); - System.exit(0); - } - - SegmentVersion version; - if (options.has(v10)) { - version = SegmentVersion.V_10; - System.out.println("Store(V10) " + store); - } else { - version = SegmentVersion.V_11; - System.out.println("Store " + store); - } - - SortedMap candidates = candidateSegments(store); - SortedMap roots = extractRoots(store, candidates, - version); - - for (Entry r : roots.entrySet()) { - System.out.println(r.getValue()); // + " @ " + r.getKey()); - } - } - - private static SortedMap extractRoots(File dir, - SortedMap candidates, final SegmentVersion version) - throws IOException, InvalidFileStoreVersionException { - - ReadOnlyStore store = FileStore.builder(dir).withSegmentVersion(version).buildReadOnly(); - - final SortedMap roots = newTreeMap(reverseOrder()); - - for (Entry c : candidates.entrySet()) { - UUID uid = c.getValue(); - SegmentId id = new SegmentId(store.getTracker(), - uid.getMostSignificantBits(), uid.getLeastSignificantBits()); - try { - Segment s = store.readSegment(id); - for (int r = 0; r < s.getRootCount(); r++) { - if (s.getRootType(r) == RecordType.NODE) { - int offset = s.getRootOffset(r); - RecordId nodeId = new RecordId(s.getSegmentId(), offset); - if (isRoot(nodeId)) { - roots.put(c.getKey() + "." + offset, - nodeId.toString10()); - } - } - } - } catch (SegmentNotFoundException ex) { - System.out.println(ex.getMessage()); - } - } - return roots; - } - - private static Set ROOT_NAMES = ImmutableSet.of("root", - "checkpoints"); - - private static boolean isRoot(RecordId nodeId) { - SegmentNodeState sns = new SegmentNodeState(nodeId); - Set childNames = ImmutableSet.copyOf(sns.getChildNodeNames()); - return sns.getPropertyCount() == 0 && childNames.size() == 2 - && difference(ROOT_NAMES, childNames).isEmpty(); - } - - private static SortedMap candidateSegments(File store) - throws IOException { - - List revs = readRevisions(store); - if (revs.isEmpty()) { - System.out.println("No revisions found."); - return newTreeMap(); - } - String head = revs.iterator().next(); - System.out.println("Current head revision " + head); - final UUID headSegment = extractSegmentId(head); - - List tars = listTars(store); - // - final SortedMap candidates = newTreeMap(reverseOrder()); - - for (final String tar : tars) { - final AtomicLong threshold = new AtomicLong(-1); - TarReader r = TarReader.open(new File(store, tar), true); - - // first identify the offset beyond which we need to include - // segments - r.accept(new TarEntryVisitor() { - @Override - public void visit(long msb, long lsb, File file, int offset, - int size) { - if (msb == headSegment.getMostSignificantBits() - && lsb == headSegment.getLeastSignificantBits()) { - threshold.set(offset); - } - } - }); - r.accept(new TarEntryVisitor() { - @Override - public void visit(long msb, long lsb, File file, int offset, - int size) { - if (offset >= threshold.get() - && SegmentId.isDataSegmentId(lsb)) { - candidates.put(tar + "." + offset, new UUID(msb, lsb)); - } - } - }); - if (threshold.get() >= 0) { - break; - } - } - return candidates; - } - - private static UUID extractSegmentId(String record) throws IOException { - RecordId head = RecordId.fromString(new MemoryStore().getTracker(), - record); - return UUID.fromString(head.getSegmentId().toString()); - } - - private static List listTars(File store) { - List files = asList(store.list(new FilenameFilter() { - @Override - public boolean accept(File dir, String name) { - return name.endsWith(".tar"); - } - })); - sort(files, reverseOrder()); - return files; - } -} diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/tika/SegmentUtils.java oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/tika/SegmentUtils.java deleted file mode 100644 index c9f9489..0000000 --- oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/tika/SegmentUtils.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.jackrabbit.oak.plugins.tika; - -import java.io.File; -import java.io.IOException; - -import com.google.common.io.Closer; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.spi.state.NodeStore; - -class SegmentUtils { - - private SegmentUtils() { - // Prevent instantiation - } - - static NodeStore bootstrap(String path, BlobStore store, Closer closer) throws IOException, InvalidFileStoreVersionException { - return SegmentNodeStore.builder(fileStore(path, store, closer)).build(); - } - - private static FileStore fileStore(String path, BlobStore store, Closer closer) throws IOException, InvalidFileStoreVersionException { - return closer.register(fileStore(path, store)); - } - - private static FileStore fileStore(String path, BlobStore store) throws IOException, InvalidFileStoreVersionException { - return FileStore.builder(new File(path)).withBlobStore(store).build(); - } - -} diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/tika/TextExtractorMain.java oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/tika/TextExtractorMain.java index 1343b9c..6380421 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/tika/TextExtractorMain.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/plugins/tika/TextExtractorMain.java @@ -32,13 +32,6 @@ import java.util.Map; import java.util.Properties; import java.util.UUID; -import com.google.common.collect.Maps; -import com.google.common.io.Closer; -import com.mongodb.MongoClientURI; -import com.mongodb.MongoURI; -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import joptsimple.OptionSpec; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.jackrabbit.aws.ext.ds.S3DataStore; @@ -51,13 +44,20 @@ import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreTextWriter; import org.apache.jackrabbit.oak.plugins.document.DocumentMK; import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore; import org.apache.jackrabbit.oak.plugins.document.util.MongoConnection; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.apache.jackrabbit.oak.spi.state.NodeStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.collect.Maps; +import com.google.common.io.Closer; +import com.mongodb.MongoClientURI; +import com.mongodb.MongoURI; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; + public class TextExtractorMain { private static final Logger log = LoggerFactory.getLogger(TextExtractorMain.class); @@ -78,8 +78,6 @@ public class TextExtractorMain { .withRequiredArg() .ofType(String.class); - OptionSpec segment = parser.accepts("segment", "Use oak-segment instead of oak-segment-tar"); - OptionSpec pathSpec = parser .accepts("path", "Path in repository under which the binaries would be searched") .withRequiredArg() @@ -216,7 +214,7 @@ public class TextExtractorMain { checkNotNull(blobStore, "BlobStore found to be null. FileDataStore directory " + "must be specified via %s", fdsDirSpec.options()); checkNotNull(dataFile, "Data file path not provided"); - NodeStore nodeStore = bootStrapNodeStore(src, options.has(segment), blobStore, closer); + NodeStore nodeStore = bootStrapNodeStore(src, blobStore, closer); BinaryResourceProvider brp = new NodeStoreBinaryResourceProvider(nodeStore, blobStore); CSVFileGenerator generator = new CSVFileGenerator(dataFile); generator.generate(brp.getBinaries(path)); @@ -286,7 +284,7 @@ public class TextExtractorMain { return props; } - private static NodeStore bootStrapNodeStore(String src, boolean segment, BlobStore blobStore, Closer closer) throws IOException, InvalidFileStoreVersionException { + private static NodeStore bootStrapNodeStore(String src, BlobStore blobStore, Closer closer) throws IOException { if (src.startsWith(MongoURI.MONGODB_PREFIX)) { MongoClientURI uri = new MongoClientURI(src); if (uri.getDatabase() == null) { @@ -303,22 +301,9 @@ public class TextExtractorMain { return store; } - if (segment) { - return SegmentUtils.bootstrap(src, blobStore, closer); - } - return SegmentTarUtils.bootstrap(src, blobStore, closer); } - private static Closeable asCloseable(final FileStore fs) { - return new Closeable() { - @Override - public void close() throws IOException { - fs.close(); - } - }; - } - private static Closeable asCloseable(final DataStore ds) { return new Closeable() { @Override diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/BackupCommand.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/BackupCommand.java index f1954a5..1f0ce5f 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/BackupCommand.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/run/BackupCommand.java @@ -21,14 +21,12 @@ import java.io.File; import joptsimple.OptionParser; import joptsimple.OptionSet; -import joptsimple.OptionSpec; class BackupCommand implements Command { @Override public void execute(String... args) throws Exception { OptionParser parser = new OptionParser(); - OptionSpec segment = parser.accepts("segment", "Use oak-segment instead of oak-segment-tar"); OptionSet options = parser.parse(args); if (options.nonOptionArguments().size() < 2) { @@ -39,12 +37,7 @@ class BackupCommand implements Command { File source = new File(options.nonOptionArguments().get(0).toString()); File target = new File(options.nonOptionArguments().get(1).toString()); - if (options.has(segment)) { - SegmentUtils.backup(source, target); - } else { - SegmentTarUtils.backup(source, target); - - } + SegmentTarUtils.backup(source, target); } } diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/CheckCommand.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/CheckCommand.java index 7f13ae0..a5ae051 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/CheckCommand.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/run/CheckCommand.java @@ -17,7 +17,7 @@ package org.apache.jackrabbit.oak.run; -import static org.apache.jackrabbit.oak.plugins.segment.FileStoreHelper.isValidFileStoreOrFail; +import static org.apache.jackrabbit.oak.segment.FileStoreHelper.isValidFileStoreOrFail; import java.io.File; import java.io.IOException; @@ -44,7 +44,6 @@ class CheckCommand implements Command { "notify", "number of seconds between progress notifications") .withRequiredArg().ofType(Long.class).defaultsTo(Long.MAX_VALUE); OptionSpec bin = parser.accepts("bin", "read the content of binary properties"); - OptionSpec segment = parser.accepts("segment", "Use oak-segment instead of oak-segment-tar"); ArgumentAcceptingOptionSpec filter = parser.accepts( "filter", "comma separated content paths to be checked") .withRequiredArg().ofType(String.class).withValuesSeparatedBy(',').defaultsTo("/"); @@ -69,11 +68,7 @@ class CheckCommand implements Command { , "A deep scan of the content tree, traversing every node, will be performed by default."); } - if (options.has(segment)) { - SegmentUtils.check(dir, journalFileName, debugLevel, options.has(bin)); - } else { - SegmentTarUtils.check(dir, journalFileName, debugLevel, options.has(bin), filterPaths, options.has(ioStatistics), out, err); - } + SegmentTarUtils.check(dir, journalFileName, debugLevel, options.has(bin), filterPaths, options.has(ioStatistics), out, err); } private void printUsage(OptionParser parser, PrintWriter err, String... messages) throws IOException { diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/CheckpointsCommand.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/CheckpointsCommand.java index 33e3e6c..edfc70f 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/CheckpointsCommand.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/run/CheckpointsCommand.java @@ -20,23 +20,23 @@ package org.apache.jackrabbit.oak.run; import java.io.File; import java.sql.Timestamp; +import org.apache.jackrabbit.oak.checkpoint.Checkpoints; +import org.apache.jackrabbit.oak.plugins.document.DocumentMK; +import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore; + import com.google.common.io.Closer; import com.mongodb.MongoClient; import com.mongodb.MongoClientURI; import com.mongodb.MongoURI; + import joptsimple.OptionParser; import joptsimple.OptionSet; -import joptsimple.OptionSpec; -import org.apache.jackrabbit.oak.checkpoint.Checkpoints; -import org.apache.jackrabbit.oak.plugins.document.DocumentMK; -import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore; class CheckpointsCommand implements Command { @Override public void execute(String... args) throws Exception { OptionParser parser = new OptionParser(); - OptionSpec segment = parser.accepts("segment", "Use oak-segment instead of oak-segment-tar"); OptionSet options = parser.parse(args); if (options.nonOptionArguments().isEmpty()) { @@ -65,8 +65,6 @@ class CheckpointsCommand implements Command { .getNodeStore(); closer.register(Utils.asCloseable(store)); cps = Checkpoints.onDocumentMK(store); - } else if (options.has(segment)) { - cps = Checkpoints.onSegment(new File(connection), closer); } else { cps = Checkpoints.onSegmentTar(new File(connection), closer); } diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/CompactCommand.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/CompactCommand.java index 657f8fc..e122471 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/CompactCommand.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/run/CompactCommand.java @@ -42,7 +42,6 @@ class CompactCommand implements Command { "Path to segment store (required)").ofType(String.class); OptionSpec forceFlag = parser.accepts( "force", "Force compaction and ignore non matching segment version"); - OptionSpec segment = parser.accepts("segment", "Use oak-segment instead of oak-segment-tar"); OptionSet options = parser.parse(args); String path = directoryArg.value(options); @@ -70,11 +69,7 @@ class CompactCommand implements Command { System.out.println(" -> compacting"); try { - if (options.has(segment)) { - SegmentUtils.compact(directory, force); - } else { - SegmentTarUtils.compact(directory, force); - } + SegmentTarUtils.compact(directory, force); success = true; } catch (Throwable e) { System.out.println("Compaction failure stack trace:"); diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/DataStoreCheckCommand.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/DataStoreCheckCommand.java index 65a85cc..b32e7c7 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/DataStoreCheckCommand.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/run/DataStoreCheckCommand.java @@ -16,6 +16,15 @@ */ package org.apache.jackrabbit.oak.run; +import static com.google.common.base.StandardSystemProperty.JAVA_IO_TMPDIR; +import static com.google.common.base.Stopwatch.createStarted; +import static com.google.common.io.Closeables.close; +import static java.util.Arrays.asList; +import static org.apache.commons.io.FileUtils.forceDelete; +import static org.apache.jackrabbit.oak.commons.FileIOUtils.sort; +import static org.apache.jackrabbit.oak.commons.FileIOUtils.writeAsLine; +import static org.apache.jackrabbit.oak.commons.FileIOUtils.writeStrings; + import java.io.BufferedWriter; import java.io.Closeable; import java.io.File; @@ -28,6 +37,16 @@ import java.util.concurrent.atomic.AtomicInteger; import javax.annotation.Nullable; +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.FilenameUtils; +import org.apache.jackrabbit.oak.commons.FileIOUtils.FileLineDifferenceIterator; +import org.apache.jackrabbit.oak.plugins.blob.BlobReferenceRetriever; +import org.apache.jackrabbit.oak.plugins.blob.ReferenceCollector; +import org.apache.jackrabbit.oak.plugins.document.DocumentBlobReferenceRetriever; +import org.apache.jackrabbit.oak.plugins.document.DocumentMK; +import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore; +import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore; + import com.google.common.base.Charsets; import com.google.common.base.Function; import com.google.common.base.Joiner; @@ -38,32 +57,12 @@ import com.google.common.io.Files; import com.mongodb.MongoClient; import com.mongodb.MongoClientURI; import com.mongodb.MongoURI; + import joptsimple.ArgumentAcceptingOptionSpec; import joptsimple.OptionParser; import joptsimple.OptionSet; import joptsimple.OptionSpec; import joptsimple.OptionSpecBuilder; -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.FilenameUtils; -import org.apache.jackrabbit.oak.commons.FileIOUtils.FileLineDifferenceIterator; -import org.apache.jackrabbit.oak.plugins.blob.BlobReferenceRetriever; -import org.apache.jackrabbit.oak.plugins.blob.ReferenceCollector; -import org.apache.jackrabbit.oak.plugins.document.DocumentBlobReferenceRetriever; -import org.apache.jackrabbit.oak.plugins.document.DocumentMK; -import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentBlobReferenceRetriever; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore; - -import static com.google.common.base.StandardSystemProperty.JAVA_IO_TMPDIR; -import static com.google.common.base.Stopwatch.createStarted; -import static com.google.common.io.Closeables.close; -import static java.util.Arrays.asList; -import static org.apache.commons.io.FileUtils.forceDelete; -import static org.apache.jackrabbit.oak.commons.FileIOUtils.sort; -import static org.apache.jackrabbit.oak.commons.FileIOUtils.writeAsLine; -import static org.apache.jackrabbit.oak.commons.FileIOUtils.writeStrings; -import static org.apache.jackrabbit.oak.plugins.segment.FileStoreHelper.openFileStore; /** * Command to check data store consistency and also optionally retrieve ids @@ -94,7 +93,6 @@ public class DataStoreCheckCommand implements Command { // Optional argument to specify the dump path ArgumentAcceptingOptionSpec dump = parser.accepts("dump", "Dump Path") .withRequiredArg().ofType(String.class); - OptionSpec segment = parser.accepts("segment", "Use oak-segment instead of oak-segment-tar"); // Optional argument to specify tracking ArgumentAcceptingOptionSpec track = parser.accepts("track", "Local repository home folder") @@ -139,10 +137,6 @@ public class DataStoreCheckCommand implements Command { closer.register(Utils.asCloseable(nodeStore)); blobStore = (GarbageCollectableBlobStore) nodeStore.getBlobStore(); marker = new DocumentBlobReferenceRetriever(nodeStore); - } else if (options.has(segment)) { - FileStore fileStore = openFileStore(source); - closer.register(Utils.asCloseable(fileStore)); - marker = new SegmentBlobReferenceRetriever(fileStore.getTracker()); } else { marker = SegmentTarUtils.newBlobReferenceRetriever(source, closer); } diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/DebugCommand.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/DebugCommand.java index 83a044b..4b221a6 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/DebugCommand.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/run/DebugCommand.java @@ -26,7 +26,6 @@ class DebugCommand implements Command { @Override public void execute(String... args) throws Exception { OptionParser parser = new OptionParser(); - OptionSpec segment = parser.accepts("segment", "Use oak-segment instead of oak-segment-tar"); OptionSpec nonOptions = parser.nonOptions().ofType(String.class); OptionSet options = parser.parse(args); @@ -37,11 +36,7 @@ class DebugCommand implements Command { String[] nonOptionsArray = options.valuesOf(nonOptions).toArray(new String[0]); - if (options.has(segment)) { - SegmentUtils.debug(nonOptionsArray); - } else { - SegmentTarUtils.debug(nonOptionsArray); - } + SegmentTarUtils.debug(nonOptionsArray); } } diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/FileStoreDiffCommand.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/FileStoreDiffCommand.java index a7de223..f79865c 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/FileStoreDiffCommand.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/run/FileStoreDiffCommand.java @@ -38,7 +38,6 @@ class FileStoreDiffCommand implements Command { OptionSpec incrementalO = parser.accepts("incremental", "Runs diffs between each subsequent revisions in the provided interval"); OptionSpec pathO = parser.accepts("path", "Filter diff by given path").withRequiredArg().ofType(String.class).defaultsTo("/"); OptionSpec ignoreSNFEsO = parser.accepts("ignore-snfes", "Ignores SegmentNotFoundExceptions and continues running the diff (experimental)"); - OptionSpec segment = parser.accepts("segment", "Use oak-segment instead of oak-segment-tar"); OptionSet options = parser.parse(args); if (options.has(help)) { @@ -61,11 +60,7 @@ class FileStoreDiffCommand implements Command { String path = pathO.value(options); boolean ignoreSNFEs = options.has(ignoreSNFEsO); - if (options.has(segment)) { - SegmentUtils.diff(store, out, listOnly, interval, incremental, path, ignoreSNFEs); - } else { - SegmentTarUtils.diff(store, out, listOnly, interval, incremental, path, ignoreSNFEs); - } + SegmentTarUtils.diff(store, out, listOnly, interval, incremental, path, ignoreSNFEs); } private File defaultOutFile() { diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/FileStoreRevisionRecoveryCommand.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/FileStoreRevisionRecoveryCommand.java deleted file mode 100644 index 743de77..0000000 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/FileStoreRevisionRecoveryCommand.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.jackrabbit.oak.run; - -import org.apache.jackrabbit.oak.plugins.segment.file.FileStoreRevisionRecovery; - -class FileStoreRevisionRecoveryCommand implements Command { - - @Override - public void execute(String... args) throws Exception { - FileStoreRevisionRecovery.main(args); - } - -} diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/GraphCommand.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/GraphCommand.java index 2fa62b4..31b9270 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/GraphCommand.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/run/GraphCommand.java @@ -45,7 +45,6 @@ class GraphCommand implements Command { "pattern", "Regular exception specifying which nodes to include (optional). " + "Ignore when --gc is specified.") .withRequiredArg().ofType(String.class); - OptionSpec segment = parser.accepts("segment", "Use oak-segment instead of oak-segment-tar"); OptionSet options = parser.parse(args); @@ -82,11 +81,7 @@ class GraphCommand implements Command { boolean gcGraph = options.has(gcGraphArg); - if (options.has(segment)) { - SegmentUtils.graph(directory, gcGraph, epoch, regExp, out); - } else { - SegmentTarUtils.graph(directory, gcGraph, epoch, regExp, out); - } + SegmentTarUtils.graph(directory, gcGraph, epoch, regExp, out); } } diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/HistoryCommand.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/HistoryCommand.java index 5ce0028..fd2df47 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/HistoryCommand.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/run/HistoryCommand.java @@ -17,7 +17,7 @@ package org.apache.jackrabbit.oak.run; -import static org.apache.jackrabbit.oak.plugins.segment.FileStoreHelper.isValidFileStoreOrFail; +import static org.apache.jackrabbit.oak.segment.FileStoreHelper.isValidFileStoreOrFail; import java.io.File; @@ -41,7 +41,6 @@ class HistoryCommand implements Command { OptionSpec depthArg = parser.accepts( "depth", "Depth up to which to dump node states").withRequiredArg().ofType(Integer.class) .defaultsTo(0); - OptionSpec segment = parser.accepts("segment", "Use oak-segment instead of oak-segment-tar"); OptionSet options = parser.parse(args); File directory = directoryArg.value(options); @@ -56,11 +55,7 @@ class HistoryCommand implements Command { String journalName = journalArg.value(options); File journal = new File(isValidFileStoreOrFail(directory), journalName); - if (options.has(segment)) { - SegmentUtils.history(directory, journal, path, depth); - } else { - SegmentTarUtils.history(directory, journal, path, depth); - } + SegmentTarUtils.history(directory, journal, path, depth); } } diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/Mode.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/Mode.java index c59a59f..8cd51b9 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/Mode.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/run/Mode.java @@ -38,7 +38,6 @@ enum Mode { TIKA("tika", new TikaCommand()), GARBAGE("garbage", new GarbageCommand()), TARMKDIFF("tarmkdiff", new FileStoreDiffCommand()), - TARMKRECOVERY("tarmkrecovery", new FileStoreRevisionRecoveryCommand()), DATASTORECHECK("datastorecheck", new DataStoreCheckCommand()), RESETCLUSTERID("resetclusterid", new ResetClusterIdCommand()), PERSISTENTCACHE("persistentcache", new PersistentCacheCommand()), diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/ResetClusterIdCommand.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/ResetClusterIdCommand.java index adff228..c1294a0 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/ResetClusterIdCommand.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/run/ResetClusterIdCommand.java @@ -16,27 +16,24 @@ */ package org.apache.jackrabbit.oak.run; -import static org.apache.jackrabbit.oak.plugins.segment.FileStoreHelper.openFileStore; - -import com.google.common.io.Closer; -import com.mongodb.MongoClient; -import com.mongodb.MongoClientURI; -import com.mongodb.MongoURI; -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import joptsimple.OptionSpec; import org.apache.jackrabbit.oak.api.CommitFailedException; import org.apache.jackrabbit.oak.api.Type; import org.apache.jackrabbit.oak.plugins.document.DocumentMK; import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore; import org.apache.jackrabbit.oak.plugins.identifier.ClusterRepositoryInfo; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; import org.apache.jackrabbit.oak.spi.commit.CommitInfo; import org.apache.jackrabbit.oak.spi.commit.EmptyHook; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; import org.apache.jackrabbit.oak.spi.state.NodeStore; +import com.google.common.io.Closer; +import com.mongodb.MongoClient; +import com.mongodb.MongoClientURI; +import com.mongodb.MongoURI; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; + /** * OFFLINE utility to delete the clusterId stored as hidden * property as defined by ClusterRepositoryInfo. @@ -84,7 +81,6 @@ class ResetClusterIdCommand implements Command { @Override public void execute(String... args) throws Exception { OptionParser parser = new OptionParser(); - OptionSpec segment = parser.accepts("segment", "Use oak-segment instead of oak-segment-tar"); OptionSet options = parser.parse(args); if (options.nonOptionArguments().isEmpty()) { @@ -105,10 +101,6 @@ class ResetClusterIdCommand implements Command { .getNodeStore(); closer.register(Utils.asCloseable(dns)); store = dns; - } else if (options.has(segment)) { - FileStore fs = openFileStore(source); - closer.register(Utils.asCloseable(fs)); - store = SegmentNodeStore.builder(fs).build(); } else { store = SegmentTarUtils.bootstrapNodeStore(source, closer); } diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/RestoreCommand.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/RestoreCommand.java index c059434..b06e384 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/RestoreCommand.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/run/RestoreCommand.java @@ -21,15 +21,12 @@ import java.io.File; import joptsimple.OptionParser; import joptsimple.OptionSet; -import joptsimple.OptionSpec; -import org.apache.jackrabbit.oak.plugins.backup.FileStoreRestore; class RestoreCommand implements Command { @Override public void execute(String... args) throws Exception { OptionParser parser = new OptionParser(); - OptionSpec segment = parser.accepts("segment", "Use oak-segment instead of oak-segment-tar"); OptionSet options = parser.parse(args); if (options.nonOptionArguments().size() < 2) { @@ -40,12 +37,7 @@ class RestoreCommand implements Command { File target = new File(options.nonOptionArguments().get(0).toString()); File source = new File(options.nonOptionArguments().get(1).toString()); - if (options.has(segment)) { - SegmentUtils.restore(source, target); - } else { - SegmentTarUtils.restore(source, target); - } - + SegmentTarUtils.restore(source, target); } } diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/SegmentTarUtils.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/SegmentTarUtils.java index a23499b..786c686 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/SegmentTarUtils.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/run/SegmentTarUtils.java @@ -17,7 +17,7 @@ package org.apache.jackrabbit.oak.run; -import static org.apache.jackrabbit.oak.plugins.segment.FileStoreHelper.isValidFileStoreOrFail; +import static org.apache.jackrabbit.oak.segment.FileStoreHelper.isValidFileStoreOrFail; import static org.apache.jackrabbit.oak.segment.SegmentVersion.LATEST_VERSION; import static org.apache.jackrabbit.oak.segment.file.FileStoreBuilder.fileStoreBuilder; diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/SegmentUtils.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/SegmentUtils.java deleted file mode 100644 index 77b77f8..0000000 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/SegmentUtils.java +++ /dev/null @@ -1,603 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.jackrabbit.oak.run; - -import static com.google.common.collect.Lists.reverse; -import static com.google.common.collect.Sets.newHashSet; -import static com.google.common.collect.Sets.newTreeSet; -import static com.google.common.escape.Escapers.builder; -import static javax.jcr.PropertyType.BINARY; -import static javax.jcr.PropertyType.STRING; -import static org.apache.commons.io.FileUtils.byteCountToDisplaySize; -import static org.apache.jackrabbit.oak.commons.PathUtils.elements; -import static org.apache.jackrabbit.oak.plugins.segment.FileStoreHelper.newBasicReadOnlyBlobStore; -import static org.apache.jackrabbit.oak.plugins.segment.FileStoreHelper.openFileStore; -import static org.apache.jackrabbit.oak.plugins.segment.FileStoreHelper.openReadOnlyFileStore; -import static org.apache.jackrabbit.oak.plugins.segment.FileStoreHelper.readRevisions; -import static org.apache.jackrabbit.oak.plugins.segment.RecordId.fromString; -import static org.apache.jackrabbit.oak.plugins.segment.RecordType.NODE; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentGraph.writeGCGraph; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentGraph.writeSegmentGraph; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStateHelper.getTemplateId; -import static org.apache.jackrabbit.oak.plugins.segment.file.tooling.ConsistencyChecker.checkConsistency; -import static org.apache.jackrabbit.oak.run.Utils.asCloseable; -import static org.slf4j.LoggerFactory.getLogger; - -import java.io.File; -import java.io.IOException; -import java.io.OutputStream; -import java.io.PrintWriter; -import java.io.RandomAccessFile; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Date; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import ch.qos.logback.classic.Level; -import ch.qos.logback.classic.Logger; -import com.google.common.collect.Maps; -import com.google.common.collect.Queues; -import com.google.common.io.Closer; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.commons.PathUtils; -import org.apache.jackrabbit.oak.commons.json.JsopBuilder; -import org.apache.jackrabbit.oak.json.JsopDiff; -import org.apache.jackrabbit.oak.plugins.backup.FileStoreBackup; -import org.apache.jackrabbit.oak.plugins.backup.FileStoreRestore; -import org.apache.jackrabbit.oak.plugins.segment.PCMAnalyser; -import org.apache.jackrabbit.oak.plugins.segment.RecordId; -import org.apache.jackrabbit.oak.plugins.segment.RecordUsageAnalyser; -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentBlob; -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNotFoundException; -import org.apache.jackrabbit.oak.plugins.segment.SegmentPropertyState; -import org.apache.jackrabbit.oak.plugins.segment.SegmentTracker; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore.ReadOnlyStore; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.plugins.segment.file.JournalReader; -import org.apache.jackrabbit.oak.plugins.segment.file.tooling.RevisionHistory; -import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStore; - -class SegmentUtils { - - private final static int MAX_CHAR_DISPLAY = Integer.getInteger("max.char.display", 60); - - private SegmentUtils() { - // Prevent instantiation - } - - static NodeStore bootstrapNodeStore(String path, Closer closer) throws IOException, InvalidFileStoreVersionException { - return SegmentNodeStore.builder(bootstrapFileStore(path, closer)).build(); - } - - static void backup(File source, File target) throws IOException { - Closer closer = Closer.create(); - try { - FileStore fs; - if (FileStoreBackup.USE_FAKE_BLOBSTORE) { - fs = openReadOnlyFileStore(source, newBasicReadOnlyBlobStore()); - } else { - fs = openReadOnlyFileStore(source); - } - closer.register(asCloseable(fs)); - NodeStore store = SegmentNodeStore.builder(fs).build(); - FileStoreBackup.backup(store, target); - } catch (Throwable e) { - throw closer.rethrow(e); - } finally { - closer.close(); - } - } - - static void restore(File source, File target) throws IOException, InvalidFileStoreVersionException { - FileStoreRestore.restore(source, target); - } - - static void debug(String... args) throws IOException, InvalidFileStoreVersionException { - File file = new File(args[0]); - System.out.println("Debug " + file); - FileStore store = openReadOnlyFileStore(file); - try { - if (args.length == 1) { - debugFileStore(store); - } else { - if (args[1].endsWith(".tar")) { - debugTarFile(store, args); - } else { - debugSegment(store, args); - } - } - } finally { - store.close(); - } - } - - static void graph(File path, boolean gcGraph, Date epoch, String regex, OutputStream out) throws Exception { - System.out.println("Opening file store at " + path); - FileStore.ReadOnlyStore fileStore = openReadOnlyFileStore(path); - if (gcGraph) { - writeGCGraph(fileStore, out); - } else { - writeSegmentGraph(fileStore, out, epoch, regex); - } - } - - static void history(File directory, File journal, String path, int depth) throws IOException, InvalidFileStoreVersionException { - Iterable history = new RevisionHistory(directory).getHistory(journal, path); - for (RevisionHistory.HistoryElement historyElement : history) { - System.out.println(historyElement.toString(depth)); - } - } - - static void check(File dir, String journalFileName, long debugLevel, boolean checkBin) throws IOException, InvalidFileStoreVersionException { - checkConsistency(dir, journalFileName, true, debugLevel, checkBin ? -1L : 0L); - } - - static void compact(File directory, boolean force) throws IOException, InvalidFileStoreVersionException { - FileStore store = openFileStore(directory.getAbsolutePath(), force); - try { - boolean persistCM = Boolean.getBoolean("tar.PersistCompactionMap"); - CompactionStrategy compactionStrategy = new CompactionStrategy( - false, CompactionStrategy.CLONE_BINARIES_DEFAULT, - CompactionStrategy.CleanupType.CLEAN_ALL, 0, - CompactionStrategy.MEMORY_THRESHOLD_DEFAULT) { - - @Override - public boolean compacted(Callable setHead) - throws Exception { - // oak-run is doing compaction single-threaded - // hence no guarding needed - go straight ahead - // and call setHead - return setHead.call(); - } - }; - compactionStrategy.setOfflineCompaction(true); - compactionStrategy.setPersistCompactionMap(persistCM); - store.setCompactionStrategy(compactionStrategy); - store.compact(); - } finally { - store.close(); - } - - System.out.println(" -> cleaning up"); - store = openFileStore(directory.getAbsolutePath(), false); - try { - for (File file : store.cleanup()) { - if (!file.exists() || file.delete()) { - System.out.println(" -> removed old file " + file.getName()); - } else { - System.out.println(" -> failed to remove old file " + file.getName()); - } - } - - String head; - File journal = new File(directory, "journal.log"); - JournalReader journalReader = new JournalReader(journal); - try { - head = journalReader.iterator().next() + " root " + System.currentTimeMillis() + "\n"; - } finally { - journalReader.close(); - } - - RandomAccessFile journalFile = new RandomAccessFile(journal, "rw"); - try { - System.out.println(" -> writing new " + journal.getName() + ": " + head); - journalFile.setLength(0); - journalFile.writeBytes(head); - journalFile.getChannel().force(false); - } finally { - journalFile.close(); - } - } finally { - store.close(); - } - } - - static void diff(File store, File out, boolean listOnly, String interval, boolean incremental, String path, boolean ignoreSNFEs) throws IOException, InvalidFileStoreVersionException { - if (listOnly) { - listRevs(store, out); - } else { - diff(store, interval, incremental, out, path, ignoreSNFEs); - } - } - - private static FileStore bootstrapFileStore(String path, Closer closer) throws IOException, InvalidFileStoreVersionException { - return closer.register(bootstrapFileStore(path)); - } - - private static FileStore bootstrapFileStore(String path) throws IOException, InvalidFileStoreVersionException { - return FileStore.builder(new File(path)).build(); - } - - private static void listRevs(File store, File out) throws IOException { - System.out.println("Store " + store); - System.out.println("Writing revisions to " + out); - List revs = readRevisions(store); - if (revs.isEmpty()) { - System.out.println("No revisions found."); - return; - } - PrintWriter pw = new PrintWriter(out); - try { - for (String r : revs) { - pw.println(r); - } - } finally { - pw.close(); - } - } - - private static void diff(File dir, String interval, boolean incremental, File out, String filter, boolean ignoreSNFEs) throws IOException, InvalidFileStoreVersionException { - System.out.println("Store " + dir); - System.out.println("Writing diff to " + out); - String[] tokens = interval.trim().split("\\.\\."); - if (tokens.length != 2) { - System.out.println("Error parsing revision interval '" + interval - + "'."); - return; - } - ReadOnlyStore store = FileStore.builder(dir).withBlobStore(newBasicReadOnlyBlobStore()).buildReadOnly(); - RecordId idL = null; - RecordId idR = null; - try { - if (tokens[0].equalsIgnoreCase("head")) { - idL = store.getHead().getRecordId(); - } else { - idL = fromString(store.getTracker(), tokens[0]); - } - if (tokens[1].equalsIgnoreCase("head")) { - idR = store.getHead().getRecordId(); - } else { - idR = fromString(store.getTracker(), tokens[1]); - } - } catch (IllegalArgumentException ex) { - System.out.println("Error parsing revision interval '" + interval + "': " + ex.getMessage()); - ex.printStackTrace(); - return; - } - - long start = System.currentTimeMillis(); - PrintWriter pw = new PrintWriter(out); - try { - if (incremental) { - List revs = readRevisions(dir); - System.out.println("Generating diff between " + idL + " and " + idR + " incrementally. Found " + revs.size() + " revisions."); - - int s = revs.indexOf(idL.toString10()); - int e = revs.indexOf(idR.toString10()); - if (s == -1 || e == -1) { - System.out.println("Unable to match input revisions with FileStore."); - return; - } - List revDiffs = revs.subList(Math.min(s, e), Math.max(s, e) + 1); - if (s > e) { - // reverse list - revDiffs = reverse(revDiffs); - } - if (revDiffs.size() < 2) { - System.out.println("Nothing to diff: " + revDiffs); - return; - } - Iterator revDiffsIt = revDiffs.iterator(); - RecordId idLt = fromString(store.getTracker(), revDiffsIt.next()); - while (revDiffsIt.hasNext()) { - RecordId idRt = fromString(store.getTracker(), revDiffsIt.next()); - boolean good = diff(store, idLt, idRt, filter, pw); - idLt = idRt; - if (!good && !ignoreSNFEs) { - break; - } - } - } else { - System.out.println("Generating diff between " + idL + " and " + idR); - diff(store, idL, idR, filter, pw); - } - } finally { - pw.close(); - } - long dur = System.currentTimeMillis() - start; - System.out.println("Finished in " + dur + " ms."); - } - - private static boolean diff(ReadOnlyStore store, RecordId idL, RecordId idR, String filter, PrintWriter pw) throws IOException { - pw.println("rev " + idL + ".." + idR); - try { - NodeState before = new SegmentNodeState(idL).getChildNode("root"); - NodeState after = new SegmentNodeState(idR).getChildNode("root"); - for (String name : elements(filter)) { - before = before.getChildNode(name); - after = after.getChildNode(name); - } - after.compareAgainstBaseState(before, new PrintingDiff(pw, filter)); - return true; - } catch (SegmentNotFoundException ex) { - System.out.println(ex.getMessage()); - pw.println("#SNFE " + ex.getSegmentId()); - return false; - } - } - - private static void debugFileStore(FileStore store) { - Map> idmap = Maps.newHashMap(); - int dataCount = 0; - long dataSize = 0; - int bulkCount = 0; - long bulkSize = 0; - - ((Logger) getLogger(SegmentTracker.class)).setLevel(Level.OFF); - RecordUsageAnalyser analyser = new RecordUsageAnalyser(); - - for (SegmentId id : store.getSegmentIds()) { - if (id.isDataSegmentId()) { - Segment segment = id.getSegment(); - dataCount++; - dataSize += segment.size(); - idmap.put(id, segment.getReferencedIds()); - analyseSegment(segment, analyser); - } else if (id.isBulkSegmentId()) { - bulkCount++; - bulkSize += id.getSegment().size(); - idmap.put(id, Collections.emptyList()); - } - } - System.out.println("Total size:"); - System.out.format( - "%s in %6d data segments%n", - byteCountToDisplaySize(dataSize), dataCount); - System.out.format( - "%s in %6d bulk segments%n", - byteCountToDisplaySize(bulkSize), bulkCount); - System.out.println(analyser.toString()); - - Set garbage = newHashSet(idmap.keySet()); - Queue queue = Queues.newArrayDeque(); - queue.add(store.getHead().getRecordId().getSegmentId()); - while (!queue.isEmpty()) { - SegmentId id = queue.remove(); - if (garbage.remove(id)) { - queue.addAll(idmap.get(id)); - } - } - dataCount = 0; - dataSize = 0; - bulkCount = 0; - bulkSize = 0; - for (SegmentId id : garbage) { - if (id.isDataSegmentId()) { - dataCount++; - dataSize += id.getSegment().size(); - } else if (id.isBulkSegmentId()) { - bulkCount++; - bulkSize += id.getSegment().size(); - } - } - System.out.format("%nAvailable for garbage collection:%n"); - System.out.format("%s in %6d data segments%n", - byteCountToDisplaySize(dataSize), dataCount); - System.out.format("%s in %6d bulk segments%n", - byteCountToDisplaySize(bulkSize), bulkCount); - System.out.format("%n%s", new PCMAnalyser(store).toString()); - } - - private static void analyseSegment(Segment segment, RecordUsageAnalyser analyser) { - for (int k = 0; k < segment.getRootCount(); k++) { - if (segment.getRootType(k) == NODE) { - RecordId nodeId = new RecordId(segment.getSegmentId(), segment.getRootOffset(k)); - try { - analyser.analyseNode(nodeId); - } catch (Exception e) { - System.err.format("Error while processing node at %s", nodeId); - e.printStackTrace(); - } - } - } - } - - private static void debugTarFile(FileStore store, String[] args) { - File root = new File(args[0]); - for (int i = 1; i < args.length; i++) { - String f = args[i]; - if (!f.endsWith(".tar")) { - System.out.println("skipping " + f); - continue; - } - File tar = new File(root, f); - if (!tar.exists()) { - System.out.println("file doesn't exist, skipping " + f); - continue; - } - System.out.println("Debug file " + tar + "(" + tar.length() + ")"); - Set uuids = new HashSet(); - boolean hasrefs = false; - for (Map.Entry> e : store.getTarReaderIndex() - .entrySet()) { - if (e.getKey().endsWith(f)) { - hasrefs = true; - uuids = e.getValue(); - } - } - if (hasrefs) { - System.out.println("SegmentNodeState references to " + f); - List paths = new ArrayList(); - filterNodeStates(uuids, paths, store.getHead(), - "/"); - for (String p : paths) { - System.out.println(" " + p); - } - } else { - System.out.println("No references to " + f); - } - - try { - Map> graph = store.getTarGraph(f); - System.out.println(); - System.out.println("Tar graph:"); - for (Map.Entry> entry : graph.entrySet()) { - System.out.println("" + entry.getKey() + '=' + entry.getValue()); - } - } catch (IOException e) { - System.out.println("Error getting tar graph:"); - } - - } - } - - private static void debugSegment(FileStore store, String[] args) { - Pattern pattern = Pattern - .compile("([0-9a-f-]+)|(([0-9a-f-]+:[0-9a-f]+)(-([0-9a-f-]+:[0-9a-f]+))?)?(/.*)?"); - for (int i = 1; i < args.length; i++) { - Matcher matcher = pattern.matcher(args[i]); - if (!matcher.matches()) { - System.err.println("Unknown argument: " + args[i]); - } else if (matcher.group(1) != null) { - UUID uuid = UUID.fromString(matcher.group(1)); - SegmentId id = store.getTracker().getSegmentId( - uuid.getMostSignificantBits(), - uuid.getLeastSignificantBits()); - System.out.println(id.getSegment()); - } else { - RecordId id1 = store.getHead().getRecordId(); - RecordId id2 = null; - if (matcher.group(2) != null) { - id1 = RecordId.fromString(store.getTracker(), - matcher.group(3)); - if (matcher.group(4) != null) { - id2 = RecordId.fromString(store.getTracker(), - matcher.group(5)); - } - } - String path = "/"; - if (matcher.group(6) != null) { - path = matcher.group(6); - } - - if (id2 == null) { - NodeState node = new SegmentNodeState(id1); - System.out.println("/ (" + id1 + ") -> " + node); - for (String name : PathUtils.elements(path)) { - node = node.getChildNode(name); - RecordId nid = null; - if (node instanceof SegmentNodeState) { - nid = ((SegmentNodeState) node).getRecordId(); - } - System.out.println(" " + name + " (" + nid + ") -> " - + node); - } - } else { - NodeState node1 = new SegmentNodeState(id1); - NodeState node2 = new SegmentNodeState(id2); - for (String name : PathUtils.elements(path)) { - node1 = node1.getChildNode(name); - node2 = node2.getChildNode(name); - } - System.out.println(JsopBuilder.prettyPrint(JsopDiff - .diffToJsop(node1, node2))); - } - } - } - } - - private static String displayString(String value) { - if (MAX_CHAR_DISPLAY > 0 && value.length() > MAX_CHAR_DISPLAY) { - value = value.substring(0, MAX_CHAR_DISPLAY) + "... (" - + value.length() + " chars)"; - } - String escaped = builder().setSafeRange(' ', '~') - .addEscape('"', "\\\"").addEscape('\\', "\\\\").build() - .escape(value); - return '"' + escaped + '"'; - } - - private static void filterNodeStates(Set uuids, List paths, SegmentNodeState state, String path) { - Set localPaths = newTreeSet(); - for (PropertyState ps : state.getProperties()) { - if (ps instanceof SegmentPropertyState) { - SegmentPropertyState sps = (SegmentPropertyState) ps; - RecordId recordId = sps.getRecordId(); - UUID id = recordId.getSegmentId().asUUID(); - if (uuids.contains(id)) { - if (ps.getType().tag() == STRING) { - String val = ""; - if (ps.count() > 0) { - // only shows the first value, do we need more? - val = displayString(ps.getValue(Type.STRING, 0)); - } - localPaths.add(path + ps.getName() + " = " + val - + " [SegmentPropertyState<" + ps.getType() - + ">@" + recordId + "]"); - } else { - localPaths.add(path + ps + " [SegmentPropertyState<" - + ps.getType() + ">@" + recordId + "]"); - } - - } - if (ps.getType().tag() == BINARY) { - // look for extra segment references - for (int i = 0; i < ps.count(); i++) { - Blob b = ps.getValue(Type.BINARY, i); - for (SegmentId sbid : SegmentBlob.getBulkSegmentIds(b)) { - UUID bid = sbid.asUUID(); - if (!bid.equals(id) && uuids.contains(bid)) { - localPaths.add(path + ps - + " [SegmentPropertyState<" - + ps.getType() + ">@" + recordId + "]"); - } - } - } - } - } - } - - RecordId stateId = state.getRecordId(); - if (uuids.contains(stateId.getSegmentId().asUUID())) { - localPaths.add(path + " [SegmentNodeState@" + stateId + "]"); - } - - RecordId templateId = getTemplateId(state); - if (uuids.contains(templateId.getSegmentId().asUUID())) { - localPaths.add(path + "[Template@" + templateId + "]"); - } - paths.addAll(localPaths); - for (ChildNodeEntry ce : state.getChildNodeEntries()) { - NodeState c = ce.getNodeState(); - if (c instanceof SegmentNodeState) { - filterNodeStates(uuids, paths, (SegmentNodeState) c, - path + ce.getName() + "/"); - } - } - } - -} diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/ServerCommand.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/ServerCommand.java index baccb97..6a88d49 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/ServerCommand.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/run/ServerCommand.java @@ -116,13 +116,6 @@ class ServerCommand implements Command { host.value(options), port.value(options), db, false, cacheSize * MB); } - - } else if (fix.equals(OakFixture.OAK_TAR)) { - File baseFile = base.value(options); - if (baseFile == null) { - throw new IllegalArgumentException("Required argument base missing."); - } - oakFixture = OakFixture.getTar(OakFixture.OAK_TAR, baseFile, 256, cacheSize, mmap.value(options), false); } else if (fix.equals(OakFixture.OAK_SEGMENT_TAR)) { File baseFile = base.value(options); if (baseFile == null) { diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/run/Utils.java oak-run/src/main/java/org/apache/jackrabbit/oak/run/Utils.java index 729f0a8..5fe1762 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/run/Utils.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/run/Utils.java @@ -31,33 +31,33 @@ import java.util.Properties; import javax.annotation.Nullable; import javax.jcr.RepositoryException; -import com.google.common.collect.Maps; -import com.google.common.io.Closer; -import com.mongodb.MongoClientURI; -import com.mongodb.MongoURI; -import joptsimple.ArgumentAcceptingOptionSpec; -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import joptsimple.OptionSpec; import org.apache.felix.cm.file.ConfigurationHandler; import org.apache.jackrabbit.core.data.DataStore; import org.apache.jackrabbit.core.data.DataStoreException; +import org.apache.jackrabbit.oak.blob.cloud.aws.s3.SharedS3DataStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore; -import org.apache.jackrabbit.oak.blob.cloud.aws.s3.SharedS3DataStore; import org.apache.jackrabbit.oak.plugins.document.DocumentMK; import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore; import org.apache.jackrabbit.oak.plugins.document.util.MongoConnection; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore; import org.apache.jackrabbit.oak.spi.state.NodeStore; +import com.google.common.collect.Maps; +import com.google.common.io.Closer; +import com.mongodb.MongoClientURI; +import com.mongodb.MongoURI; + +import joptsimple.ArgumentAcceptingOptionSpec; +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; + class Utils { private static final long MB = 1024 * 1024; - public static NodeStore bootstrapNodeStore(String[] args, Closer closer, String h) throws IOException, InvalidFileStoreVersionException { + public static NodeStore bootstrapNodeStore(String[] args, Closer closer, String h) throws IOException { //TODO add support for other NodeStore flags OptionParser parser = new OptionParser(); OptionSpec clusterId = parser @@ -68,7 +68,6 @@ class Utils { OptionSpec cacheSizeSpec = parser. accepts("cacheSize", "cache size").withRequiredArg(). ofType(Integer.class).defaultsTo(0); - OptionSpec segmentTar = parser.accepts("segment-tar", "Use oak-segment-tar instead of oak-segment"); OptionSpec help = parser.acceptsAll(asList("h", "?", "help"), "show help").forHelp(); OptionSpec nonOption = parser @@ -114,11 +113,7 @@ class Utils { return store; } - if (options.has(segmentTar)) { - return SegmentTarUtils.bootstrapNodeStore(src, closer); - } - - return SegmentUtils.bootstrapNodeStore(src, closer); + return SegmentTarUtils.bootstrapNodeStore(src, closer); } @Nullable @@ -159,16 +154,6 @@ class Utils { return blobStore; } - static Closeable asCloseable(final FileStore fs) { - return new Closeable() { - - @Override - public void close() throws IOException { - fs.close(); - } - }; - } - static Closeable asCloseable(final DocumentNodeStore dns) { return new Closeable() { diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/scalability/ScalabilityRunner.java oak-run/src/main/java/org/apache/jackrabbit/oak/scalability/ScalabilityRunner.java index ba64168..0063af1 100644 --- oak-run/src/main/java/org/apache/jackrabbit/oak/scalability/ScalabilityRunner.java +++ oak-run/src/main/java/org/apache/jackrabbit/oak/scalability/ScalabilityRunner.java @@ -28,16 +28,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import com.google.common.base.Charsets; -import com.google.common.base.Splitter; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; - -import joptsimple.OptionParser; -import joptsimple.OptionSet; -import joptsimple.OptionSpec; - import org.apache.commons.io.FileUtils; import org.apache.jackrabbit.oak.benchmark.CSVResultGenerator; import org.apache.jackrabbit.oak.benchmark.util.Date; @@ -69,6 +59,16 @@ import org.apache.jackrabbit.oak.scalability.suites.ScalabilityBlobSearchSuite; import org.apache.jackrabbit.oak.scalability.suites.ScalabilityNodeRelationshipSuite; import org.apache.jackrabbit.oak.scalability.suites.ScalabilityNodeSuite; +import com.google.common.base.Charsets; +import com.google.common.base.Splitter; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; + /** * Main class for running scalability/longevity tests. * @@ -141,10 +141,6 @@ public class ScalabilityRunner { host.value(options), port.value(options), dbName.value(options), dropDBAfterTest.value(options), cacheSize * MB), - OakRepositoryFixture.getTar( - base.value(options), 256, cacheSize, mmap.value(options)), - OakRepositoryFixture.getTarWithBlobStore(base.value(options), 256, cacheSize, - mmap.value(options), fdsCache.value(options)), OakRepositoryFixture.getSegmentTar( base.value(options), 256, cacheSize, mmap.value(options)), OakRepositoryFixture.getSegmentTarWithBlobStore(base.value(options), 256, cacheSize, diff --git oak-run/src/main/java/org/apache/jackrabbit/oak/segment/FileStoreHelper.java oak-run/src/main/java/org/apache/jackrabbit/oak/segment/FileStoreHelper.java new file mode 100644 index 0000000..e890d68 --- /dev/null +++ oak-run/src/main/java/org/apache/jackrabbit/oak/segment/FileStoreHelper.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.jackrabbit.oak.segment; + +import static com.google.common.base.Preconditions.checkArgument; + +import java.io.File; + +public class FileStoreHelper { + + public static File isValidFileStoreOrFail(File store) { + checkArgument(isValidFileStore(store), "Invalid FileStore directory " + + store); + return store; + } + + /** + * Checks if the provided directory is a valid FileStore + * + * @return true if the provided directory is a valid FileStore + */ + public static boolean isValidFileStore(File store) { + if (!store.exists()) { + return false; + } + if (!store.isDirectory()) { + return false; + } + // for now the only check is the existence of the journal file + for (String f : store.list()) { + if ("journal.log".equals(f)) { + return true; + } + } + return false; + } +} diff --git oak-segment/pom.xml oak-segment/pom.xml deleted file mode 100644 index e789c07..0000000 --- oak-segment/pom.xml +++ /dev/null @@ -1,230 +0,0 @@ - - - - - - 4.0.0 - - - org.apache.jackrabbit - oak-parent - 1.8-SNAPSHOT - ../oak-parent/pom.xml - - - oak-segment - bundle - - Oak Segment - - - - - org.apache.felix - maven-bundle-plugin - - - - org.apache.jackrabbit.oak.plugins.backup, - org.apache.jackrabbit.oak.plugins.segment, - org.apache.jackrabbit.oak.plugins.segment.http, - org.apache.jackrabbit.oak.plugins.segment.file, - - - - - - org.apache.felix - maven-scr-plugin - - - - - - - - - - org.apache.jackrabbit - oak-commons - ${project.version} - provided - - - org.apache.jackrabbit - oak-blob - ${project.version} - provided - - - org.apache.jackrabbit - oak-blob-cloud - ${project.version} - provided - - - org.apache.jackrabbit - oak-core - ${project.version} - provided - - - - - - javax.jcr - jcr - 2.0 - provided - - - org.apache.jackrabbit - jackrabbit-jcr-commons - ${jackrabbit.version} - provided - - - org.apache.jackrabbit - jackrabbit-api - ${jackrabbit.version} - provided - - - org.apache.jackrabbit - jackrabbit-data - ${jackrabbit.version} - provided - - - - - - org.slf4j - slf4j-api - - - commons-io - commons-io - - - com.google.guava - guava - - - com.google.code.findbugs - jsr305 - - - org.osgi - org.osgi.core - provided - - - org.osgi - org.osgi.compendium - provided - - - biz.aQute.bnd - bndlib - provided - - - org.apache.felix - org.apache.felix.scr.annotations - provided - - - - - - org.apache.jackrabbit - oak-core - ${project.version} - test-jar - test - - - org.apache.jackrabbit - oak-commons - ${project.version} - test-jar - test - - - - - - junit - junit - test - - - org.apache.sling - org.apache.sling.testing.osgi-mock - test - - - commons-lang - commons-lang - test - - - org.easymock - easymock - test - - - org.apache.commons - commons-math3 - test - - - org.slf4j - jul-to-slf4j - test - - - ch.qos.logback - logback-classic - test - - - junit-addons - junit-addons - 1.4 - test - - - xerces - xercesImpl - - - xerces - xmlParserAPIs - - - - - org.mockito - mockito-core - 1.10.19 - test - - - \ No newline at end of file diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/backup/FileStoreBackup.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/backup/FileStoreBackup.java deleted file mode 100644 index ec86016..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/backup/FileStoreBackup.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.backup; - -import static com.google.common.base.Preconditions.checkArgument; - -import java.io.File; -import java.io.IOException; - -import org.apache.jackrabbit.oak.plugins.segment.Compactor; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.plugins.segment.file.tooling.BasicReadOnlyBlobStore; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.base.Stopwatch; - -@Deprecated -public class FileStoreBackup { - - private static final Logger log = LoggerFactory - .getLogger(FileStoreBackup.class); - - @Deprecated - public static boolean USE_FAKE_BLOBSTORE = Boolean.getBoolean("oak.backup.UseFakeBlobStore"); - - @Deprecated - public static void backup(NodeStore store, File destination) throws IOException, InvalidFileStoreVersionException { - checkArgument(store instanceof SegmentNodeStore); - Stopwatch watch = Stopwatch.createStarted(); - NodeState current = ((SegmentNodeStore) store).getSuperRoot(); - FileStore.Builder builder = FileStore.builder(destination) - .withDefaultMemoryMapping(); - if (USE_FAKE_BLOBSTORE) { - builder.withBlobStore(new BasicReadOnlyBlobStore()); - } - FileStore backup = builder.build(); - try { - SegmentNodeState state = backup.getHead(); - Compactor compactor = new Compactor(backup.getTracker()); - compactor.setDeepCheckLargeBinaries(true); - compactor.setContentEqualityCheck(true); - SegmentNodeState after = compactor.compact(state, current, state); - backup.setHead(state, after); - } finally { - backup.close(); - } - watch.stop(); - log.info("Backup finished in {}.", watch); - } -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/backup/FileStoreBackupRestore.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/backup/FileStoreBackupRestore.java deleted file mode 100644 index ad780c7..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/backup/FileStoreBackupRestore.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.backup; - -import static com.google.common.base.Preconditions.checkNotNull; -import static java.lang.System.nanoTime; -import static org.apache.jackrabbit.oak.management.ManagementOperation.done; -import static org.apache.jackrabbit.oak.management.ManagementOperation.newManagementOperation; -import static org.apache.jackrabbit.oak.management.ManagementOperation.Status.formatTime; -import static org.apache.jackrabbit.oak.plugins.backup.FileStoreBackup.backup; -import static org.apache.jackrabbit.oak.plugins.backup.FileStoreRestore.restore; - -import java.io.File; -import java.util.concurrent.Callable; -import java.util.concurrent.Executor; - -import javax.annotation.Nonnull; -import javax.management.openmbean.CompositeData; - -import org.apache.jackrabbit.oak.api.jmx.FileStoreBackupRestoreMBean; -import org.apache.jackrabbit.oak.management.ManagementOperation; -import org.apache.jackrabbit.oak.spi.state.NodeStore; - -/** - * Default implementation of {@link FileStoreBackupRestoreMBean} based on a file. - */ -@Deprecated -public class FileStoreBackupRestore implements FileStoreBackupRestoreMBean { - - @Deprecated - public static final String BACKUP_OP_NAME = "Backup"; - - @Deprecated - public static final String RESTORE_OP_NAME = "Restore"; - - private final NodeStore store; - private final File file; - private final Executor executor; - - private ManagementOperation backupOp = done(BACKUP_OP_NAME, ""); - private ManagementOperation restoreOp = done(RESTORE_OP_NAME, ""); - - /** - * @param store store to back up from or restore to - * @param file file to back up to or restore from - * @param executor executor for running the back up or restore operation - */ - @Deprecated - public FileStoreBackupRestore( - @Nonnull NodeStore store, - @Nonnull File file, - @Nonnull Executor executor) { - this.store = checkNotNull(store); - this.file = checkNotNull(file); - this.executor = checkNotNull(executor); - } - - @Override - @Deprecated - public synchronized CompositeData startBackup() { - if (backupOp.isDone()) { - backupOp = newManagementOperation("Backup", new Callable() { - @Override - public String call() throws Exception { - long t0 = nanoTime(); - backup(store, file); - return "Backup completed in " + formatTime(nanoTime() - t0); - } - }); - executor.execute(backupOp); - } - return getBackupStatus(); - } - - @Override - @Deprecated - public synchronized CompositeData getBackupStatus() { - return backupOp.getStatus().toCompositeData(); - } - - @Override - @Deprecated - public synchronized CompositeData startRestore() { - if (restoreOp.isDone()) { - restoreOp = newManagementOperation("Restore", new Callable() { - @Override - public String call() throws Exception { - long t0 = nanoTime(); - restore(file, store); - return "Restore completed in " + formatTime(nanoTime() - t0); - } - }); - executor.execute(restoreOp); - } - return getRestoreStatus(); - } - - @Override - @Deprecated - public synchronized CompositeData getRestoreStatus() { - return restoreOp.getStatus().toCompositeData(); - } - - @Override - @Deprecated - public String checkpoint(long lifetime) { - return store.checkpoint(lifetime); - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/backup/FileStoreRestore.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/backup/FileStoreRestore.java deleted file mode 100644 index ec2c16b..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/backup/FileStoreRestore.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.backup; - -import java.io.File; -import java.io.IOException; - -import com.google.common.base.Stopwatch; -import org.apache.jackrabbit.oak.plugins.segment.Compactor; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore.ReadOnlyStore; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Deprecated -public class FileStoreRestore { - - private static final Logger log = LoggerFactory - .getLogger(FileStoreRestore.class); - - static int MAX_FILE_SIZE = 256; - - private static final String JOURNAL_FILE_NAME = "journal.log"; - - @Deprecated - public static void restore(File source, File destination) throws IOException, InvalidFileStoreVersionException { - if (!validFileStore(source)) { - throw new IOException("Folder " + source - + " is not a valid FileStore directory"); - } - - FileStore restore = FileStore.builder(source).buildReadOnly(); - Stopwatch watch = Stopwatch.createStarted(); - - FileStore store = FileStore.builder(destination).build(); - SegmentNodeState current = store.getHead(); - try { - Compactor compactor = new Compactor(store.getTracker()); - compactor.setDeepCheckLargeBinaries(true); - SegmentNodeState after = compactor.compact(current, - restore.getHead(), current); - store.setHead(current, after); - } finally { - restore.close(); - store.close(); - } - watch.stop(); - log.info("Restore finished in {}.", watch); - } - - @Deprecated - public static void restore(File source, NodeStore store) { - log.warn("Restore not available as an online operation."); - } - - @Deprecated - private static boolean validFileStore(File source) { - if (source == null || !source.isDirectory()) { - return false; - } - for (String f : source.list()) { - if (JOURNAL_FILE_NAME.equals(f)) { - return true; - } - } - return false; - } -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/BlockRecord.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/BlockRecord.java deleted file mode 100644 index 8a8ebeb..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/BlockRecord.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkElementIndex; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkPositionIndexes; - -/** - * A record of type "BLOCK". - */ -class BlockRecord extends Record { - - private final int size; - - BlockRecord(RecordId id, int size) { - super(id); - this.size = size; - } - - /** - * Reads bytes from this block. Up to the given number of bytes are - * read starting from the given position within this block. The number - * of bytes read is returned. - * - * @param position position within this block - * @param buffer target buffer - * @param offset offset within the target buffer - * @param length maximum number of bytes to read - * @return number of bytes that could be read - */ - public int read(int position, byte[] buffer, int offset, int length) { - checkElementIndex(position, size); - checkNotNull(buffer); - checkPositionIndexes(offset, offset + length, buffer.length); - - if (position + length > size) { - length = size - position; - } - if (length > 0) { - getSegment().readBytes(getOffset(position), buffer, offset, length); - } - return length; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/CancelableDiff.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/CancelableDiff.java deleted file mode 100644 index 1e4c740..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/CancelableDiff.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import com.google.common.base.Supplier; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStateDiff; - -/** - * A {@code NodeStateDiff} that cancels itself when a condition occurs. The - * condition is represented by an externally provided instance of {@code - * Supplier}. If the {@code Supplier} returns {@code true}, the diffing process - * will be canceled at the first possible occasion. - */ -class CancelableDiff implements NodeStateDiff { - - private final NodeStateDiff delegate; - - private final Supplier canceled; - - public CancelableDiff(NodeStateDiff delegate, Supplier canceled) { - this.delegate = delegate; - this.canceled = canceled; - } - - @Override - public final boolean propertyAdded(PropertyState after) { - if (canceled.get()) { - return false; - } - - return delegate.propertyAdded(after); - } - - @Override - public final boolean propertyChanged(PropertyState before, PropertyState after) { - if (canceled.get()) { - return false; - } - - return delegate.propertyChanged(before, after); - } - - @Override - public final boolean propertyDeleted(PropertyState before) { - if (canceled.get()) { - return false; - } - - return delegate.propertyDeleted(before); - } - - @Override - public final boolean childNodeAdded(String name, NodeState after) { - if (canceled.get()) { - return false; - } - - return delegate.childNodeAdded(name, after); - } - - @Override - public final boolean childNodeChanged(String name, NodeState before, NodeState after) { - if (canceled.get()) { - return false; - } - - return delegate.childNodeChanged(name, before, after); - } - - @Override - public final boolean childNodeDeleted(String name, NodeState before) { - if (canceled.get()) { - return false; - } - - return delegate.childNodeDeleted(name, before); - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/CompactionMap.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/CompactionMap.java deleted file mode 100644 index 6e141d4..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/CompactionMap.java +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Lists.newArrayList; - -import java.util.Collections; -import java.util.List; -import java.util.Set; -import java.util.UUID; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; - -/** - * A {@code CompactionMap} is a composite of multiple {@link PartialCompactionMap} - * instances. Operations performed on this map are delegated back to the individual - * maps. - */ -@Deprecated -public class CompactionMap { - - /** - * An empty map. - */ - @Deprecated - public static final CompactionMap EMPTY = - new CompactionMap(Collections.emptyList(), 0); - - private final List maps; - - /** - * Generation represents the number of compaction cycles since the system - * came online. This is not persisted so it will be reset to 0 on each - * restart - */ - private final int generation; - - private CompactionMap(@Nonnull List maps, int generation) { - this.maps = maps; - this.generation = generation; - } - - /** - * Checks whether the record with the given {@code before} identifier was - * compacted to a new record with the given {@code after} identifier. - * - * @param before before record identifier - * @param after after record identifier - * @return whether {@code before} was compacted to {@code after} - */ - @Deprecated - public boolean wasCompactedTo(@Nonnull RecordId before, @Nonnull RecordId after) { - for (PartialCompactionMap map : maps) { - if (map.wasCompactedTo(before, after)) { - return true; - } - } - return false; - } - - /** - * Checks whether content in the segment with the given identifier was - * compacted to new segments. - * - * @param id segment identifier - * @return whether the identified segment was compacted - */ - @Deprecated - public boolean wasCompacted(@Nonnull UUID id) { - for (PartialCompactionMap map : maps) { - if (map.wasCompacted(id)) { - return true; - } - } - return false; - } - - /** - * Retrieve the record id {@code before} maps to or {@code null} - * if no such id exists. - * @param before before record id - * @return after record id or {@code null} - */ - @CheckForNull - @Deprecated - public RecordId get(@Nonnull RecordId before) { - for (PartialCompactionMap map : maps) { - RecordId after = map.get(before); - if (after != null) { - return after; - } - } - return null; - } - - /** - * Remove all keys from this map where {@code keys.contains(key.asUUID())}. - * @param uuids uuids of the keys to remove - */ - @Deprecated - public void remove(@Nonnull Set uuids) { - for (PartialCompactionMap map : maps) { - map.remove(uuids); - } - } - - /** - * Create a new {@code CompactionMap} containing all maps - * of this instances and additional the passed map {@code head}. - * @param head - * @return a new {@code CompactionMap} instance - */ - @Nonnull - @Deprecated - public CompactionMap cons(@Nonnull PartialCompactionMap head) { - List maps = newArrayList(head); - for (PartialCompactionMap map : this.maps) { - if (!map.isEmpty()) { - maps.add(map); - } - } - return new CompactionMap(maps, generation + 1); - } - - /** - * Java's lacking libraries... - * @param longs - * @return sum of the passed {@code longs} - */ - @Deprecated - public static long sum(long[] longs) { - long sum = 0; - for (long x : longs) { - sum += x; - } - return sum; - } - - /** - * The depth of the compaction map is the number of partial compaction maps - * this map consists of. - * - * @return the depth of this compaction map - * @see #cons(PartialCompactionMap) - */ - @Deprecated - public int getDepth() { - return maps.size(); - } - - @Deprecated - public int getGeneration() { - return generation; - } - - /** - * The weight of the compaction map is its memory consumption bytes - * @return Estimated weight of the compaction map - */ - @Deprecated - public long[] getEstimatedWeights() { - long[] weights = new long[maps.size()]; - int c = 0; - for (PartialCompactionMap map : maps) { - weights[c++] = map.getEstimatedWeight(); - } - return weights; - } - - /** - * Number of segments referenced by the keys in this map. The returned value might only - * be based on the compressed part of the individual maps. - * @return number of segments - */ - @Deprecated - public long[] getSegmentCounts() { - long[] counts = new long[maps.size()]; - int c = 0; - for (PartialCompactionMap map : maps) { - counts[c++] = map.getSegmentCount(); - } - return counts; - } - - /** - * Number of records referenced by the keys in this map. The returned value might only - * be based on the compressed part of the individual maps. - * @return number of records - */ - @Deprecated - public long[] getRecordCounts() { - long[] counts = new long[maps.size()]; - int c = 0; - for (PartialCompactionMap map : maps) { - counts[c++] = map.getRecordCount(); - } - return counts; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java deleted file mode 100644 index 8f46c81..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Compactor.java +++ /dev/null @@ -1,525 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Maps.newHashMap; -import static org.apache.jackrabbit.oak.api.Type.BINARIES; -import static org.apache.jackrabbit.oak.api.Type.BINARY; -import static org.apache.jackrabbit.oak.commons.PathUtils.concat; -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; - -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import com.google.common.base.Predicate; -import com.google.common.base.Predicates; -import com.google.common.base.Supplier; -import com.google.common.base.Suppliers; -import com.google.common.hash.Hashing; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.commons.IOUtils; -import org.apache.jackrabbit.oak.plugins.memory.BinaryPropertyState; -import org.apache.jackrabbit.oak.plugins.memory.MultiBinaryPropertyState; -import org.apache.jackrabbit.oak.plugins.memory.PropertyStates; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy; -import org.apache.jackrabbit.oak.spi.state.ApplyDiff; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Tool for compacting segments. - */ -@Deprecated -public class Compactor { - - /** Logger instance */ - private static final Logger log = LoggerFactory.getLogger(Compactor.class); - - private static boolean eagerFlush = Boolean - .getBoolean("oak.compaction.eagerFlush"); - - static { - if (eagerFlush) { - log.debug("Eager flush enabled."); - } - } - - /** - * Locks down the RecordId persistence structure - */ - static long[] recordAsKey(RecordId r) { - return new long[] { r.getSegmentId().getMostSignificantBits(), - r.getSegmentId().getLeastSignificantBits(), r.getOffset() }; - } - - private final SegmentTracker tracker; - - private final SegmentWriter writer; - - private final PartialCompactionMap map; - - /** - * Filters nodes that will be included in the compaction map, allowing for - * optimization in case of an offline compaction - */ - private Predicate includeInMap = Predicates.alwaysTrue(); - - private final ProgressTracker progress = new ProgressTracker(); - - /** - * Map from {@link #getBlobKey(Blob) blob keys} to matching compacted - * blob record identifiers. Used to de-duplicate copies of the same - * binary values. - */ - private final Map> binaries = newHashMap(); - - /** - * If the compactor should copy large binaries as streams or just copy the - * refs - */ - private final boolean cloneBinaries; - - /** - * In the case of large inlined binaries, compaction will verify if all - * referenced segments exist in order to determine if a full clone is - * necessary, or just a shallow copy of the RecordId list is enough - * (Used in Backup scenario) - */ - private boolean deepCheckLargeBinaries; - - /** - * Flag to use content equality verification before actually compacting the - * state, on the childNodeChanged diff branch - * (Used in Backup scenario) - */ - private boolean contentEqualityCheck; - - /** - * Allows the cancellation of the compaction process. If this {@code - * Supplier} returns {@code true}, this compactor will cancel compaction and - * return a partial {@code SegmentNodeState} containing the changes - * compacted before the cancellation. - */ - private final Supplier cancel; - - @Deprecated - public Compactor(SegmentTracker tracker) { - this(tracker, Suppliers.ofInstance(false)); - } - - @Deprecated - public Compactor(SegmentTracker tracker, Supplier cancel) { - this.tracker = tracker; - this.writer = tracker.getWriter(); - this.map = new InMemoryCompactionMap(tracker); - this.cloneBinaries = false; - this.cancel = cancel; - } - - @Deprecated - public Compactor(SegmentTracker tracker, CompactionStrategy compactionStrategy) { - this(tracker, compactionStrategy, Suppliers.ofInstance(false)); - } - - @Deprecated - public Compactor(SegmentTracker tracker, CompactionStrategy compactionStrategy, Supplier cancel) { - this.tracker = tracker; - String wid = "c-" + (tracker.getCompactionMap().getGeneration() + 1); - this.writer = tracker.createSegmentWriter(wid); - if (compactionStrategy.getPersistCompactionMap()) { - this.map = new PersistedCompactionMap(tracker); - } else { - this.map = new InMemoryCompactionMap(tracker); - } - this.cloneBinaries = compactionStrategy.cloneBinaries(); - if (compactionStrategy.isOfflineCompaction()) { - includeInMap = new OfflineCompactionPredicate(); - } - this.cancel = cancel; - } - - protected SegmentNodeBuilder process(NodeState before, NodeState after, NodeState onto) throws IOException { - SegmentNodeBuilder builder = new SegmentNodeBuilder(writer.writeNode(onto), writer); - new CompactDiff(builder).diff(before, after); - return builder; - } - - /** - * Compact the differences between a {@code before} and a {@code after} - * on top of an {@code onto} state. - * @param before the before state - * @param after the after state - * @param onto the onto state - * @return the compacted state - */ - @Deprecated - public SegmentNodeState compact(NodeState before, NodeState after, NodeState onto) throws IOException { - progress.start(); - SegmentNodeState compacted = process(before, after, onto).getNodeState(); - writer.flush(); - progress.stop(); - return compacted; - } - - @Deprecated - public PartialCompactionMap getCompactionMap() { - map.compress(); - return map; - } - - private class CompactDiff extends ApplyDiff { - private IOException exception; - - /** - * Current processed path, or null if the trace log is not enabled at - * the beginning of the compaction call. The null check will also be - * used to verify if a trace log will be needed or not - */ - private final String path; - - CompactDiff(NodeBuilder builder) { - super(builder); - if (log.isTraceEnabled()) { - this.path = "/"; - } else { - this.path = null; - } - } - - private CompactDiff(NodeBuilder builder, String path, String childName) { - super(builder); - if (path != null) { - this.path = concat(path, childName); - } else { - this.path = null; - } - } - - boolean diff(NodeState before, NodeState after) throws IOException { - boolean success = after.compareAgainstBaseState(before, new CancelableDiff(this, cancel)); - if (exception != null) { - throw new IOException(exception); - } - return success; - } - - @Override - public boolean propertyAdded(PropertyState after) { - if (path != null) { - log.trace("propertyAdded {}/{}", path, after.getName()); - } - progress.onProperty(); - return super.propertyAdded(compact(after)); - } - - @Override - public boolean propertyChanged(PropertyState before, PropertyState after) { - if (path != null) { - log.trace("propertyChanged {}/{}", path, after.getName()); - } - progress.onProperty(); - return super.propertyChanged(before, compact(after)); - } - - @Override - public boolean childNodeAdded(String name, NodeState after) { - if (path != null) { - log.trace("childNodeAdded {}/{}", path, name); - } - - RecordId id = null; - if (after instanceof SegmentNodeState) { - id = ((SegmentNodeState) after).getRecordId(); - RecordId compactedId = map.get(id); - if (compactedId != null) { - builder.setChildNode(name, new SegmentNodeState(compactedId)); - return true; - } - } - - progress.onNode(); - try { - NodeBuilder child; - if (eagerFlush) { - child = builder.setChildNode(name); - } else { - child = EMPTY_NODE.builder(); - } - boolean success = new CompactDiff(child, path, name).diff(EMPTY_NODE, after); - if (success) { - SegmentNodeState state = writer.writeNode(child.getNodeState()); - builder.setChildNode(name, state); - if (id != null && includeInMap.apply(after)) { - map.put(id, state.getRecordId()); - } - } - return success; - } catch (IOException e) { - exception = e; - return false; - } - } - - @Override - public boolean childNodeChanged( - String name, NodeState before, NodeState after) { - if (path != null) { - log.trace("childNodeChanged {}/{}", path, name); - } - - RecordId id = null; - if (after instanceof SegmentNodeState) { - id = ((SegmentNodeState) after).getRecordId(); - RecordId compactedId = map.get(id); - if (compactedId != null) { - builder.setChildNode(name, new SegmentNodeState(compactedId)); - return true; - } - } - - if (contentEqualityCheck && before.equals(after)) { - return true; - } - - progress.onNode(); - try { - NodeBuilder child = builder.getChildNode(name); - boolean success = new CompactDiff(child, path, name).diff(before, after); - if (success) { - RecordId compactedId = writer.writeNode(child.getNodeState()).getRecordId(); - if (id != null) { - map.put(id, compactedId); - } - } - return success; - } catch (IOException e) { - exception = e; - return false; - } - } - } - - private PropertyState compact(PropertyState property) { - String name = property.getName(); - Type type = property.getType(); - if (type == BINARY) { - Blob blob = compact(property.getValue(Type.BINARY)); - return BinaryPropertyState.binaryProperty(name, blob); - } else if (type == BINARIES) { - List blobs = new ArrayList(); - for (Blob blob : property.getValue(BINARIES)) { - blobs.add(compact(blob)); - } - return MultiBinaryPropertyState.binaryPropertyFromBlob(name, blobs); - } else { - Object value = property.getValue(type); - return PropertyStates.createProperty(name, value, type); - } - } - - /** - * Compacts (and de-duplicates) the given blob. - * - * @param blob blob to be compacted - * @return compacted blob - */ - private Blob compact(Blob blob) { - if (blob instanceof SegmentBlob) { - SegmentBlob sb = (SegmentBlob) blob; - try { - // Check if we've already cloned this specific record - RecordId id = sb.getRecordId(); - RecordId compactedId = map.get(id); - if (compactedId != null) { - return new SegmentBlob(compactedId); - } - - progress.onBinary(); - - // if the blob is inlined or external, just clone it - if (sb.isExternal() || sb.length() < Segment.MEDIUM_LIMIT) { - SegmentBlob clone = sb.clone(writer, false); - map.put(id, clone.getRecordId()); - return clone; - } - - // alternatively look if the exact same binary has been cloned - String key = getBlobKey(blob); - List ids = binaries.get(key); - if (ids != null) { - for (RecordId duplicateId : ids) { - if (new SegmentBlob(duplicateId).equals(sb)) { - map.put(id, duplicateId); - return new SegmentBlob(duplicateId); - } - } - } - - boolean clone = cloneBinaries; - if (deepCheckLargeBinaries) { - clone = clone - || !tracker.getStore().containsSegment( - id.getSegmentId()); - if (!clone) { - for (SegmentId bid : SegmentBlob.getBulkSegmentIds(sb)) { - if (!tracker.getStore().containsSegment(bid)) { - clone = true; - break; - } - } - } - } - - // if not, clone the large blob and keep track of the result - sb = sb.clone(writer, clone); - map.put(id, sb.getRecordId()); - if (ids == null) { - ids = newArrayList(); - binaries.put(key, ids); - } - ids.add(sb.getRecordId()); - - return sb; - } catch (IOException e) { - log.warn("Failed to compact a blob", e); - // fall through - } - } - - // no way to compact this blob, so we'll just keep it as-is - return blob; - } - - private static String getBlobKey(Blob blob) throws IOException { - InputStream stream = blob.getNewStream(); - try { - byte[] buffer = new byte[SegmentWriter.BLOCK_SIZE]; - int n = IOUtils.readFully(stream, buffer, 0, buffer.length); - return blob.length() + ":" + Hashing.sha1().hashBytes(buffer, 0, n); - } finally { - stream.close(); - } - } - - private static class ProgressTracker { - private final long logAt = Long.getLong("compaction-progress-log", - 150000); - - private long start = 0; - - private long nodes = 0; - private long properties = 0; - private long binaries = 0; - - void start() { - nodes = 0; - properties = 0; - binaries = 0; - start = System.currentTimeMillis(); - } - - void onNode() { - if (++nodes % logAt == 0) { - logProgress(start, false); - start = System.currentTimeMillis(); - } - } - - void onProperty() { - properties++; - } - - void onBinary() { - binaries++; - } - - void stop() { - logProgress(start, true); - } - - private void logProgress(long start, boolean done) { - log.debug( - "Compacted {} nodes, {} properties, {} binaries in {} ms.", - nodes, properties, binaries, System.currentTimeMillis() - - start); - if (done) { - log.info( - "Finished compaction: {} nodes, {} properties, {} binaries.", - nodes, properties, binaries); - } - } - } - - private static class OfflineCompactionPredicate implements - Predicate { - - /** - * over 64K in size, node will be included in the compaction map - */ - private static final long offlineThreshold = 65536; - - @Override - public boolean apply(NodeState state) { - if (state.getChildNodeCount(2) > 1) { - return true; - } - long count = 0; - for (PropertyState ps : state.getProperties()) { - Type type = ps.getType(); - for (int i = 0; i < ps.count(); i++) { - long size = 0; - if (type == BINARY || type == BINARIES) { - Blob blob = ps.getValue(BINARY, i); - if (blob instanceof SegmentBlob) { - if (!((SegmentBlob) blob).isExternal()) { - size += blob.length(); - } - } else { - size += blob.length(); - } - } else { - size = ps.size(i); - } - count += size; - if (size >= offlineThreshold || count >= offlineThreshold) { - return true; - } - } - } - return false; - } - } - - @Deprecated - public void setDeepCheckLargeBinaries(boolean deepCheckLargeBinaries) { - this.deepCheckLargeBinaries = deepCheckLargeBinaries; - } - - @Deprecated - public void setContentEqualityCheck(boolean contentEqualityCheck) { - this.contentEqualityCheck = contentEqualityCheck; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/InMemoryCompactionMap.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/InMemoryCompactionMap.java deleted file mode 100644 index a8d0e2e..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/InMemoryCompactionMap.java +++ /dev/null @@ -1,423 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Maps.newHashMap; -import static com.google.common.collect.Maps.newTreeMap; -import static com.google.common.collect.Sets.newTreeSet; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.decode; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.encode; - -import java.util.Arrays; -import java.util.Collections; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.UUID; - -import javax.annotation.Nonnull; - -/** - * Immutable, space-optimized mapping of compacted record identifiers. - * Used to optimize record equality comparisons across a compaction operation - * without holding references to the {@link SegmentId} instances of the old, - * compacted segments. - *

- * The data structure used by this class consists of four parts: - *

    - *
  1. The {@link #recent} map of recently compacted entries is maintained - * while the compaction is in progress and new entries need to be added. - * These entries are periodically compressed into the more - * memory-efficient structure described below. - *
  2. The {@link #msbs} and {@link #lsbs} arrays store the identifiers - * of all old, compacted segments. The identifiers are stored in - * increasing order, with the i'th identifier stored in the - * {@code msbs[i]} and {@code lsbs[i]} slots. Interpolation search - * is used to quickly locate any given identifier. - *
  3. Each compacted segment identifier is associated with a list of - * mapping entries that point from a record offset within that - * segment to the new identifier of the compacted record. The - * {@link #entryIndex} array is used to to locate these lists within - * the larger entry arrays described below. The list of entries for - * the i'th identifier consists of entries from {@code entryIndex[i]} - * (inclusive) to {@code entryIndex[i+1]} (exclusive). An extra - * sentinel slot is added at the end of the array to make the above - * rule work also for the last compacted segment identifier. - *
  4. The mapping entries are stored in the {@link #beforeOffsets}, - * {@link #afterSegmentIds} and {@link #afterOffsets} arrays. Once the - * list of entries for a given compacted segment is found, the - * before record offsets are scanned to find a match. If a match is - * found, the corresponding compacted record will be identified by the - * respective after segment identifier and offset. - *
- *

- * Assuming each compacted segment contains {@code n} compacted records on - * average, the amortized size of each entry in this mapping is about - * {@code 20/n + 8} bytes, assuming compressed pointers. - */ -@Deprecated -public class InMemoryCompactionMap implements PartialCompactionMap { - - /** - * Number of map entries to keep until compressing this map. - */ - private static final int COMPRESS_INTERVAL = Integer.getInteger("compress-interval", 100000); - - private final SegmentTracker tracker; - - private Map recent = newHashMap(); - - private long[] msbs = new long[0]; - private long[] lsbs = new long[0]; - private short[] beforeOffsets = new short[0]; - - private int[] entryIndex = new int[0]; - private short[] afterOffsets = new short[0]; - - private int[] afterSegmentIds = new int[0]; - private long[] afterMsbs = new long[0]; - private long[] afterLsbs = new long[0]; - - InMemoryCompactionMap(@Nonnull SegmentTracker tracker) { - this.tracker = tracker; - } - - @Override - @Deprecated - public boolean wasCompactedTo(@Nonnull RecordId before, @Nonnull RecordId after) { - return after.equals(get(before)); - } - - @Override - @Deprecated - public boolean wasCompacted(@Nonnull UUID id) { - return findEntry(id.getMostSignificantBits(), id.getLeastSignificantBits()) != -1; - } - - @Override - @Deprecated - public RecordId get(@Nonnull RecordId before) { - RecordId after = recent.get(before); - if (after != null) { - return after; - } - - //empty map - if (msbs.length == 0) { - return null; - } - - SegmentId segmentId = before.getSegmentId(); - long msb = segmentId.getMostSignificantBits(); - long lsb = segmentId.getLeastSignificantBits(); - int offset = before.getOffset(); - - int entry = findEntry(msb, lsb); - if (entry != -1) { - int index = entryIndex[entry]; - int limit = entryIndex[entry + 1]; - for (int i = index; i < limit; i++) { - int o = decode(beforeOffsets[i]); - if (o == offset) { - // found it! - return new RecordId(asSegmentId(i), decode(afterOffsets[i])); - } else if (o > offset) { - return null; - } - } - } - - return null; - } - - @Nonnull - private SegmentId asSegmentId(int index) { - int idx = afterSegmentIds[index]; - return new SegmentId(tracker, afterMsbs[idx], afterLsbs[idx]); - } - - @Nonnull - private static UUID asUUID(SegmentId id) { - return new UUID(id.getMostSignificantBits(), - id.getLeastSignificantBits()); - } - - @Override - @Deprecated - public void put(@Nonnull RecordId before, @Nonnull RecordId after) { - if (get(before) != null) { - throw new IllegalArgumentException(); - } - recent.put(before, after); - if (recent.size() >= COMPRESS_INTERVAL) { - compress(); - } - } - - @Override - @Deprecated - public void remove(@Nonnull Set uuids) { - compress(uuids); - } - - @Override - @Deprecated - public void compress() { - compress(Collections.emptySet()); - } - - @Override - @Deprecated - public long getSegmentCount() { - return msbs.length; - } - - @Override - @Deprecated - public long getRecordCount() { - return afterOffsets.length; - } - - @Override - @Deprecated - public boolean isEmpty() { - return afterOffsets.length == 0 && recent.isEmpty(); - } - - private void compress(@Nonnull Set removed) { - if (recent.isEmpty() && removed.isEmpty()) { - // no-op - return; - } - - Set uuids = newTreeSet(); - int newSize = 0; - Map> mapping = newTreeMap(); - for (Entry entry : recent.entrySet()) { - RecordId before = entry.getKey(); - - SegmentId id = before.getSegmentId(); - UUID uuid = new UUID( - id.getMostSignificantBits(), - id.getLeastSignificantBits()); - if (uuids.add(uuid) && !removed.contains(uuid)) { - newSize++; - } - - Map map = mapping.get(uuid); - if (map == null) { - map = newTreeMap(); - mapping.put(uuid, map); - } - map.put(before.getOffset(), entry.getValue()); - } - - for (int i = 0; i < msbs.length; i++) { - UUID uuid = new UUID(msbs[i], lsbs[i]); - if (uuids.add(uuid) && !removed.contains(uuid)) { - newSize++; - } - } - - long[] newMsbs = new long[newSize]; - long[] newLsbs = new long[newSize]; - int[] newEntryIndex = new int[newSize + 1]; - - int newEntries = beforeOffsets.length + recent.size(); - short[] newBeforeOffsets = new short[newEntries]; - short[] newAfterOffsets = new short[newEntries]; - - int[] newAfterSegmentIds = new int[newEntries]; - Map newAfterSegments = newHashMap(); - - int newIndex = 0; - int newEntry = 0; - int oldEntry = 0; - for (UUID uuid : uuids) { - long msb = uuid.getMostSignificantBits(); - long lsb = uuid.getLeastSignificantBits(); - - if (removed.contains(uuid)) { - if (oldEntry < msbs.length - && msbs[oldEntry] == msb - && lsbs[oldEntry] == lsb) { - oldEntry++; - } - continue; - } - - // offset -> record - Map newSegment = mapping.get(uuid); - if (newSegment == null) { - newSegment = newTreeMap(); - } - - if (oldEntry < msbs.length - && msbs[oldEntry] == msb - && lsbs[oldEntry] == lsb) { - int index = entryIndex[oldEntry]; - int limit = entryIndex[oldEntry + 1]; - for (int i = index; i < limit; i++) { - newSegment.put(decode(beforeOffsets[i]), new RecordId( - asSegmentId(i), decode(afterOffsets[i]))); - } - oldEntry++; - } - - newMsbs[newEntry] = msb; - newLsbs[newEntry] = lsb; - newEntryIndex[newEntry++] = newIndex; - for (Entry entry : newSegment.entrySet()) { - int key = entry.getKey(); - RecordId id = entry.getValue(); - newBeforeOffsets[newIndex] = encode(key); - newAfterOffsets[newIndex] = encode(id.getOffset()); - - UUID aUUID = asUUID(id.getSegmentId()); - int aSIdx; - if (newAfterSegments.containsKey(aUUID)) { - aSIdx = newAfterSegments.get(aUUID); - } else { - aSIdx = newAfterSegments.size(); - newAfterSegments.put(aUUID, aSIdx); - } - newAfterSegmentIds[newIndex] = aSIdx; - - newIndex++; - } - } - - newEntryIndex[newEntry] = newIndex; - - this.msbs = newMsbs; - this.lsbs = newLsbs; - this.entryIndex = newEntryIndex; - - if (newIndex < newBeforeOffsets.length) { - this.beforeOffsets = Arrays.copyOf(newBeforeOffsets, newIndex); - this.afterOffsets = Arrays.copyOf(newAfterOffsets, newIndex); - this.afterSegmentIds = Arrays.copyOf(newAfterSegmentIds, newIndex); - } else { - this.beforeOffsets = newBeforeOffsets; - this.afterOffsets = newAfterOffsets; - this.afterSegmentIds = newAfterSegmentIds; - } - - this.afterMsbs = new long[newAfterSegments.size()]; - this.afterLsbs = new long[newAfterSegments.size()]; - for (Entry entry : newAfterSegments.entrySet()) { - this.afterMsbs[entry.getValue()] = entry.getKey() - .getMostSignificantBits(); - this.afterLsbs[entry.getValue()] = entry.getKey() - .getLeastSignificantBits(); - } - - recent = newHashMap(); - } - - /** - * Finds the given segment identifier (UUID) within the list of - * identifiers of compacted segments tracked by this instance. - * Since the UUIDs are randomly generated and we keep the list - * sorted, we can use interpolation search to achieve - * {@code O(log log n)} lookup performance. - * - * @param msb most significant bits of the UUID - * @param lsb least significant bits of the UUID - * @return entry index, or {@code -1} if not found - */ - private final int findEntry(long msb, long lsb) { - int lowIndex = 0; - int highIndex = msbs.length - 1; - - // Use floats to prevent integer overflow during interpolation. - // Lost accuracy is no problem, since we use interpolation only - // as a guess of where the target value is located and the actual - // comparisons are still done using the original values. - float lowValue = Long.MIN_VALUE; - float highValue = Long.MAX_VALUE; - float targetValue = msb; - - while (lowIndex <= highIndex) { - int guessIndex = lowIndex; - float valueRange = highValue - lowValue; - if (valueRange >= 1) { // no point in interpolating further - // Math.round() also prevents IndexOutOfBoundsExceptions - // caused by possible inaccuracy in the float computations. - guessIndex += Math.round( - (highIndex - lowIndex) * (targetValue - lowValue) - / valueRange); - } - - long m = msbs[guessIndex]; - if (msb < m) { - highIndex = guessIndex - 1; - highValue = m; - } else if (msb > m) { - lowIndex = guessIndex + 1; - lowValue = m; - } else { - // getting close... - long l = lsbs[guessIndex]; - if (lsb < l) { - highIndex = guessIndex - 1; - highValue = m; - } else if (lsb > l) { - highIndex = guessIndex + 1; - highValue = m; - } else { - // found it! - return guessIndex; - } - } - } - - // not found - return -1; - } - - @Override - @Deprecated - public long getEstimatedWeight() { - // estimation of the object including empty 'recent' map - long total = 168; - - // msbs - total += 24 + msbs.length * 8; - // lsbs - total += 24 + lsbs.length * 8; - // beforeOffsets - total += 24 + beforeOffsets.length * 2; - - // entryIndex - total += 24 + entryIndex.length * 4; - // afterOffsets - total += 24 + afterOffsets.length * 2; - - // afterSegmentIds - total += 24 + afterSegmentIds.length * 4; - // afterMsbs - total += 24 + afterMsbs.length * 8; - // afterLsbs - total += 24 + afterLsbs.length * 8; - - return total; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/ListRecord.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/ListRecord.java deleted file mode 100644 index 3dffc94..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/ListRecord.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkElementIndex; -import static com.google.common.base.Preconditions.checkPositionIndexes; -import static com.google.common.collect.Lists.newArrayListWithCapacity; -import static java.util.Collections.emptyList; -import static java.util.Collections.singletonList; - -import java.util.List; - -/** - * A record of type "LIST". - */ -class ListRecord extends Record { - - static final int LEVEL_SIZE = Segment.SEGMENT_REFERENCE_LIMIT; - - private final int size; - - private final int bucketSize; - - ListRecord(RecordId id, int size) { - super(id); - checkArgument(size >= 0); - this.size = size; - - int bs = 1; - while (bs * LEVEL_SIZE < size) { - bs *= LEVEL_SIZE; - } - this.bucketSize = bs; - } - - public int size() { - return size; - } - - public RecordId getEntry(int index) { - checkElementIndex(index, size); - if (size == 1) { - return getRecordId(); - } else { - int bucketIndex = index / bucketSize; - int bucketOffset = index % bucketSize; - Segment segment = getSegment(); - RecordId id = segment.readRecordId(getOffset(0, bucketIndex)); - ListRecord bucket = new ListRecord( - id, Math.min(bucketSize, size - bucketIndex * bucketSize)); - return bucket.getEntry(bucketOffset); - } - } - - public List getEntries() { - return getEntries(0, size); - } - - public List getEntries(int index, int count) { - if (index + count > size) { - count = size - index; - } - if (count == 0) { - return emptyList(); - } else if (count == 1) { - return singletonList(getEntry(index)); - } else { - List ids = newArrayListWithCapacity(count); - getEntries(index, count, ids); - return ids; - } - } - - private void getEntries(int index, int count, List ids) { - checkPositionIndexes(index, index + count, size); - Segment segment = getSegment(); - if (size == 1) { - ids.add(getRecordId()); - } else if (bucketSize == 1) { - for (int i = 0; i < count; i++) { - ids.add(segment.readRecordId(getOffset(0, index + i))); - } - } else { - while (count > 0) { - int bucketIndex = index / bucketSize; - int bucketOffset = index % bucketSize; - RecordId id = segment.readRecordId(getOffset(0, bucketIndex)); - ListRecord bucket = new ListRecord( - id, Math.min(bucketSize, size - bucketIndex * bucketSize)); - int n = Math.min(bucket.size() - bucketOffset, count); - bucket.getEntries(bucketOffset, n, ids); - index += n; - count -= n; - } - } - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapEntry.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapEntry.java deleted file mode 100644 index 842fa58..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapEntry.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; -import static org.apache.jackrabbit.oak.plugins.segment.MapRecord.HASH_MASK; - -import java.util.Map; - -import javax.annotation.Nonnull; - -import org.apache.jackrabbit.oak.spi.state.AbstractChildNodeEntry; - -import com.google.common.collect.ComparisonChain; - -/** - * Representation of a single key-value entry in a map. - */ -class MapEntry extends AbstractChildNodeEntry - implements Map.Entry, Comparable { - - private final String name; - - private final RecordId key; - - private final RecordId value; - - MapEntry(String name, RecordId key, RecordId value) { - this.name = checkNotNull(name); - this.key = checkNotNull(key); - this.value = value; - } - - public int getHash() { - return MapRecord.getHash(name); - } - - //----------------------------------------------------< ChildNodeEntry >-- - - @Override @Nonnull - public String getName() { - return name; - } - - @Override @Nonnull - public SegmentNodeState getNodeState() { - checkState(value != null); - return new SegmentNodeState(value); - } - - //---------------------------------------------------------< Map.Entry >-- - - @Override - public RecordId getKey() { - return key; - } - - @Override - public RecordId getValue() { - return value; - } - - @Override - public RecordId setValue(RecordId value) { - throw new UnsupportedOperationException(); - } - - //--------------------------------------------------------< Comparable >-- - - @Override - public int compareTo(MapEntry that) { - return ComparisonChain.start() - .compare(getHash() & HASH_MASK, that.getHash() & HASH_MASK) - .compare(name, that.name) - .compare(value, that.value) - .result(); - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapRecord.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapRecord.java deleted file mode 100644 index 1cda3f5..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapRecord.java +++ /dev/null @@ -1,615 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.collect.Iterables.concat; -import static com.google.common.collect.Lists.newArrayListWithCapacity; -import static java.lang.Integer.bitCount; -import static java.lang.Integer.highestOneBit; -import static java.lang.Integer.numberOfTrailingZeros; - -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; - -import org.apache.jackrabbit.oak.spi.state.DefaultNodeStateDiff; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStateDiff; - -import com.google.common.base.Objects; -import com.google.common.collect.ComparisonChain; - -/** - * A map. The top level record is either a record of type "BRANCH" or "LEAF" - * (depending on the data). - */ -class MapRecord extends Record { - - /** - * Magic constant from a random number generator, used to generate - * good hash values. - */ - private static final int M = 0xDEECE66D; - private static final int A = 0xB; - static final long HASH_MASK = 0xFFFFFFFFL; - - /** - * Generates a hash code for the value, using a random number generator - * to improve the distribution of the hash values. - */ - static int getHash(String name) { - return (name.hashCode() ^ M) * M + A; - } - - /** - * Number of bits of the hash code to look at on each level of the trie. - */ - protected static final int BITS_PER_LEVEL = 5; - - /** - * Number of buckets at each level of the trie. - */ - protected static final int BUCKETS_PER_LEVEL = 1 << BITS_PER_LEVEL; // 32 - - /** - * Maximum number of trie levels. - */ - protected static final int MAX_NUMBER_OF_LEVELS = - (32 + BITS_PER_LEVEL - 1) / BITS_PER_LEVEL; // 7 - - /** - * Number of bits needed to indicate the current trie level. - * Currently 4. - */ - protected static final int LEVEL_BITS = // 4, using nextPowerOfTwo(): - numberOfTrailingZeros(highestOneBit(MAX_NUMBER_OF_LEVELS) << 1); - - /** - * Number of bits used to indicate the size of a map. - * Currently 28. - */ - protected static final int SIZE_BITS = 32 - LEVEL_BITS; - - /** - * Maximum size of a map. - */ - protected static final int MAX_SIZE = (1 << SIZE_BITS) - 1; // ~268e6 - - protected MapRecord(RecordId id) { - super(id); - } - - boolean isLeaf() { - Segment segment = getSegment(); - int head = segment.readInt(getOffset(0)); - if (isDiff(head)) { - RecordId base = segment.readRecordId(getOffset(8, 2)); - return new MapRecord(base).isLeaf(); - } - return !isBranch(head); - } - - public boolean isDiff() { - return isDiff(getSegment().readInt(getOffset(0))); - } - - MapRecord[] getBuckets() { - Segment segment = getSegment(); - MapRecord[] buckets = new MapRecord[BUCKETS_PER_LEVEL]; - int bitmap = segment.readInt(getOffset(4)); - int ids = 0; - for (int i = 0; i < BUCKETS_PER_LEVEL; i++) { - if ((bitmap & (1 << i)) != 0) { - buckets[i] = new MapRecord( - segment.readRecordId(getOffset(8, ids++))); - } else { - buckets[i] = null; - } - } - return buckets; - } - - private List getBucketList(Segment segment) { - List buckets = newArrayListWithCapacity(BUCKETS_PER_LEVEL); - int bitmap = segment.readInt(getOffset(4)); - int ids = 0; - for (int i = 0; i < BUCKETS_PER_LEVEL; i++) { - if ((bitmap & (1 << i)) != 0) { - RecordId id = segment.readRecordId(getOffset(8, ids++)); - buckets.add(new MapRecord(id)); - } - } - return buckets; - } - - int size() { - Segment segment = getSegment(); - int head = segment.readInt(getOffset(0)); - if (isDiff(head)) { - RecordId base = segment.readRecordId(getOffset(8, 2)); - return new MapRecord(base).size(); - } - return getSize(head); - } - - MapEntry getEntry(String name) { - checkNotNull(name); - int hash = getHash(name); - Segment segment = getSegment(); - - int head = segment.readInt(getOffset(0)); - if (isDiff(head)) { - if (hash == segment.readInt(getOffset(4))) { - RecordId key = segment.readRecordId(getOffset(8)); - if (name.equals(Segment.readString(key))) { - RecordId value = segment.readRecordId(getOffset(8, 1)); - return new MapEntry(name, key, value); - } - } - RecordId base = segment.readRecordId(getOffset(8, 2)); - return new MapRecord(base).getEntry(name); - } - - int size = getSize(head); - if (size == 0) { - return null; // shortcut - } - - int level = getLevel(head); - if (isBranch(size, level)) { - // this is an intermediate branch record - // check if a matching bucket exists, and recurse - int bitmap = segment.readInt(getOffset(4)); - int mask = (1 << BITS_PER_LEVEL) - 1; - int shift = 32 - (level + 1) * BITS_PER_LEVEL; - int index = (hash >> shift) & mask; - int bit = 1 << index; - if ((bitmap & bit) != 0) { - int ids = bitCount(bitmap & (bit - 1)); - RecordId id = segment.readRecordId(getOffset(8, ids)); - return new MapRecord(id).getEntry(name); - } else { - return null; - } - } - - // use interpolation search to find the matching entry in this map leaf - int shift = 32 - level * BITS_PER_LEVEL; - long mask = -1L << shift; - long h = hash & HASH_MASK; - int p = 0; - long pH = h & mask; // lower bound on hash values in this map leaf - int q = size - 1; - long qH = pH | ~mask; // upper bound on hash values in this map leaf - while (p <= q) { - assert pH <= qH; - - // interpolate the most likely index of the target entry - // based on its hash code and the lower and upper bounds - int i = p + (int) ((q - p) * (h - pH) / (qH - pH)); - assert p <= i && i <= q; - - long iH = segment.readInt(getOffset(4 + i * 4)) & HASH_MASK; - int diff = Long.valueOf(iH).compareTo(Long.valueOf(h)); - if (diff == 0) { - RecordId keyId = segment.readRecordId( - getOffset(4 + size * 4, i * 2)); - RecordId valueId = segment.readRecordId( - getOffset(4 + size * 4, i * 2 + 1)); - diff = Segment.readString(keyId).compareTo(name); - if (diff == 0) { - return new MapEntry(name, keyId, valueId); - } - } - - if (diff < 0) { - p = i + 1; - pH = iH; - } else { - q = i - 1; - qH = iH; - } - } - return null; - } - - private RecordId getValue(int hash, RecordId key) { - checkNotNull(key); - Segment segment = getSegment(); - - int head = segment.readInt(getOffset(0)); - if (isDiff(head)) { - if (hash == segment.readInt(getOffset(4)) - && key.equals(segment.readRecordId(getOffset(8)))) { - return segment.readRecordId(getOffset(8, 1)); - } - RecordId base = segment.readRecordId(getOffset(8, 2)); - return new MapRecord(base).getValue(hash, key); - } - - int size = getSize(head); - if (size == 0) { - return null; // shortcut - } - - int level = getLevel(head); - if (isBranch(size, level)) { - // this is an intermediate branch record - // check if a matching bucket exists, and recurse - int bitmap = segment.readInt(getOffset(4)); - int mask = (1 << BITS_PER_LEVEL) - 1; - int shift = 32 - (level + 1) * BITS_PER_LEVEL; - int index = (hash >> shift) & mask; - int bit = 1 << index; - if ((bitmap & bit) != 0) { - int ids = bitCount(bitmap & (bit - 1)); - RecordId id = segment.readRecordId(getOffset(8, ids)); - return new MapRecord(id).getValue(hash, key); - } else { - return null; - } - } - - // this is a leaf record; scan the list to find a matching entry - Long h = hash & HASH_MASK; - for (int i = 0; i < size; i++) { - int hashOffset = getOffset(4 + i * 4); - int diff = h.compareTo(segment.readInt(hashOffset) & HASH_MASK); - if (diff > 0) { - return null; - } else if (diff == 0) { - int keyOffset = getOffset(4 + size * 4, i * 2); - if (key.equals(segment.readRecordId(keyOffset))) { - int valueOffset = getOffset(4 + size * 4, i * 2 + 1); - return segment.readRecordId(valueOffset); - } - } - } - return null; - } - - Iterable getKeys() { - Segment segment = getSegment(); - - int head = segment.readInt(getOffset(0)); - if (isDiff(head)) { - RecordId base = segment.readRecordId(getOffset(8, 2)); - return new MapRecord(base).getKeys(); - } - - int size = getSize(head); - if (size == 0) { - return Collections.emptyList(); // shortcut - } - - int level = getLevel(head); - if (isBranch(size, level)) { - List buckets = getBucketList(segment); - List> keys = - newArrayListWithCapacity(buckets.size()); - for (MapRecord bucket : buckets) { - keys.add(bucket.getKeys()); - } - return concat(keys); - } - - RecordId[] ids = new RecordId[size]; - for (int i = 0; i < size; i++) { - ids[i] = segment.readRecordId(getOffset(4 + size * 4, i * 2)); - } - - String[] keys = new String[size]; - for (int i = 0; i < size; i++) { - keys[i] = Segment.readString(ids[i]); - } - return Arrays.asList(keys); - } - - Iterable getEntries() { - return getEntries(null, null); - } - - private Iterable getEntries( - final RecordId diffKey, final RecordId diffValue) { - Segment segment = getSegment(); - - int head = segment.readInt(getOffset(0)); - if (isDiff(head)) { - RecordId key = segment.readRecordId(getOffset(8)); - RecordId value = segment.readRecordId(getOffset(8, 1)); - RecordId base = segment.readRecordId(getOffset(8, 2)); - return new MapRecord(base).getEntries(key, value); - } - - int size = getSize(head); - if (size == 0) { - return Collections.emptyList(); // shortcut - } - - int level = getLevel(head); - if (isBranch(size, level)) { - List buckets = getBucketList(segment); - List> entries = - newArrayListWithCapacity(buckets.size()); - for (final MapRecord bucket : buckets) { - entries.add(new Iterable() { - @Override - public Iterator iterator() { - return bucket.getEntries(diffKey, diffValue).iterator(); - } - }); - } - return concat(entries); - } - - MapEntry[] entries = new MapEntry[size]; - for (int i = 0; i < size; i++) { - RecordId key = segment.readRecordId(getOffset(4 + size * 4, i * 2)); - RecordId value; - if (key.equals(diffKey)) { - value = diffValue; - } else { - value = segment.readRecordId(getOffset(4 + size * 4, i * 2 + 1)); - } - String name = Segment.readString(key); - entries[i] = new MapEntry(name, key, value); - } - return Arrays.asList(entries); - } - - boolean compare(MapRecord before, final NodeStateDiff diff) { - if (fastEquals(this, before)) { - return true; - } - - Segment segment = getSegment(); - int head = segment.readInt(getOffset(0)); - if (isDiff(head)) { - int hash = segment.readInt(getOffset(4)); - RecordId keyId = segment.readRecordId(getOffset(8)); - final String key = Segment.readString(keyId); - final RecordId value = segment.readRecordId(getOffset(8, 1)); - MapRecord base = new MapRecord(segment.readRecordId(getOffset(8, 2))); - - boolean rv = base.compare(before, new DefaultNodeStateDiff() { - @Override - public boolean childNodeAdded(String name, NodeState after) { - return name.equals(key) - || diff.childNodeAdded(name, after); - } - @Override - public boolean childNodeChanged( - String name, NodeState before, NodeState after) { - return name.equals(key) - || diff.childNodeChanged(name, before, after); - } - @Override - public boolean childNodeDeleted(String name, NodeState before) { - return diff.childNodeDeleted(name, before); - } - }); - if (rv) { - MapEntry beforeEntry = before.getEntry(key); - if (beforeEntry == null) { - rv = diff.childNodeAdded( - key, - new SegmentNodeState(value)); - } else if (!value.equals(beforeEntry.getValue())) { - rv = diff.childNodeChanged( - key, - beforeEntry.getNodeState(), - new SegmentNodeState(value)); - } - } - return rv; - } - - Segment beforeSegment = before.getSegment(); - int beforeHead = beforeSegment.readInt(before.getOffset(0)); - if (isDiff(beforeHead)) { - int hash = beforeSegment.readInt(before.getOffset(4)); - RecordId keyId = beforeSegment.readRecordId(before.getOffset(8)); - final String key = Segment.readString(keyId); - final RecordId value = beforeSegment.readRecordId(before.getOffset(8, 1)); - MapRecord base = new MapRecord(beforeSegment.readRecordId(before.getOffset(8, 2))); - - boolean rv = this.compare(base, new DefaultNodeStateDiff() { - @Override - public boolean childNodeAdded(String name, NodeState after) { - return diff.childNodeAdded(name, after); - } - @Override - public boolean childNodeChanged( - String name, NodeState before, NodeState after) { - return name.equals(key) - || diff.childNodeChanged(name, before, after); - } - @Override - public boolean childNodeDeleted(String name, NodeState before) { - return name.equals(key) - || diff.childNodeDeleted(name, before); - } - }); - if (rv) { - MapEntry afterEntry = this.getEntry(key); - if (afterEntry == null) { - rv = diff.childNodeDeleted( - key, - new SegmentNodeState(value)); - } else if (!value.equals(afterEntry.getValue())) { - rv = diff.childNodeChanged( - key, - new SegmentNodeState(value), - afterEntry.getNodeState()); - } - } - return rv; - } - - if (isBranch(beforeHead) && isBranch(head)) { - return compareBranch(before, this, diff); - } - - Iterator beforeEntries = before.getEntries().iterator(); - Iterator afterEntries = this.getEntries().iterator(); - - MapEntry beforeEntry = nextOrNull(beforeEntries); - MapEntry afterEntry = nextOrNull(afterEntries); - while (beforeEntry != null || afterEntry != null) { - int d = compare(beforeEntry, afterEntry); - if (d < 0) { - if (!diff.childNodeDeleted( - beforeEntry.getName(), beforeEntry.getNodeState())) { - return false; - } - beforeEntry = nextOrNull(beforeEntries); - } else if (d == 0) { - if (!beforeEntry.getValue().equals(afterEntry.getValue()) - && !diff.childNodeChanged( - beforeEntry.getName(), - beforeEntry.getNodeState(), - afterEntry.getNodeState())) { - return false; - } - beforeEntry = nextOrNull(beforeEntries); - afterEntry = nextOrNull(afterEntries); - } else { - if (!diff.childNodeAdded( - afterEntry.getName(), afterEntry.getNodeState())) { - return false; - } - afterEntry = nextOrNull(afterEntries); - } - } - - return true; - } - - //------------------------------------------------------------< Object >-- - - @Override - public String toString() { - StringBuilder builder = null; - for (MapEntry entry : getEntries()) { - if (builder == null) { - builder = new StringBuilder("{ "); - } else { - builder.append(", "); - } - builder.append(entry); - } - if (builder == null) { - return "{}"; - } else { - builder.append(" }"); - return builder.toString(); - } - } - - //-----------------------------------------------------------< private >-- - - /** - * Compares two map branches. Given the way the comparison algorithm - * works, the branches are always guaranteed to be at the same level - * with the same hash prefixes. - */ - private static boolean compareBranch( - MapRecord before, MapRecord after, NodeStateDiff diff) { - MapRecord[] beforeBuckets = before.getBuckets(); - MapRecord[] afterBuckets = after.getBuckets(); - for (int i = 0; i < BUCKETS_PER_LEVEL; i++) { - if (Objects.equal(beforeBuckets[i], afterBuckets[i])) { - // these buckets are equal (or both empty), so no changes - } else if (beforeBuckets[i] == null) { - // before bucket is empty, so all after entries were added - MapRecord bucket = afterBuckets[i]; - for (MapEntry entry : bucket.getEntries()) { - if (!diff.childNodeAdded( - entry.getName(), entry.getNodeState())) { - return false; - } - } - } else if (afterBuckets[i] == null) { - // after bucket is empty, so all before entries were deleted - MapRecord bucket = beforeBuckets[i]; - for (MapEntry entry : bucket.getEntries()) { - if (!diff.childNodeDeleted( - entry.getName(), entry.getNodeState())) { - return false; - } - } - } else { - // both before and after buckets exist; compare recursively - MapRecord beforeBucket = beforeBuckets[i]; - MapRecord afterBucket = afterBuckets[i]; - if (!afterBucket.compare(beforeBucket, diff)) { - return false; - } - } - } - return true; - } - - private static int getSize(int head) { - return head & ((1 << MapRecord.SIZE_BITS) - 1); - } - - private static int getLevel(int head) { - return head >>> MapRecord.SIZE_BITS; - } - - private static boolean isDiff(int head) { - return head == -1; - } - - private static boolean isBranch(int head) { - return isBranch(getSize(head), getLevel(head)); - } - - private static boolean isBranch(int size, int level) { - return size > MapRecord.BUCKETS_PER_LEVEL - && level < MapRecord.MAX_NUMBER_OF_LEVELS; - } - - private static int compare(MapEntry before, MapEntry after) { - if (before == null) { - // A null value signifies the end of the list of entries, - // which is why the return value here is a bit counter-intuitive - // (null > non-null). The idea is to make a virtual end-of-list - // sentinel value appear greater than any normal value. - return 1; - } else if (after == null) { - return -1; // see above - } else { - return ComparisonChain.start() - .compare(before.getHash() & HASH_MASK, after.getHash() & HASH_MASK) - .compare(before.getName(), after.getName()) - .result(); - } - } - - private static MapEntry nextOrNull(Iterator iterator) { - if (iterator.hasNext()) { - return iterator.next(); - } else { - return null; - } - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/PartialCompactionMap.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/PartialCompactionMap.java deleted file mode 100644 index 3eb08e6..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/PartialCompactionMap.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import java.util.Set; -import java.util.UUID; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; - -/** - * A {@code PartialCompactionMap} maps uncompacted to compacted record ids - * from a single compaction cycle. - * - * @see CompactionMap - */ -@Deprecated -public interface PartialCompactionMap { - - /** - * Checks whether the record with the given {@code before} identifier was - * compacted to a new record with the given {@code after} identifier. - * - * @param before before record identifier - * @param after after record identifier - * @return whether {@code before} was compacted to {@code after} - */ - @Deprecated - boolean wasCompactedTo(@Nonnull RecordId before, @Nonnull RecordId after); - - /** - * Checks whether content in the segment with the given identifier was - * compacted to new segments. - * - * @param id segment identifier - * @return whether the identified segment was compacted - */ - @Deprecated - boolean wasCompacted(@Nonnull UUID id); - - /** - * Retrieve the record id {@code before} maps to or {@code null} - * if no such id exists. - * @param before before record id - * @return after record id or {@code null} - */ - @CheckForNull - @Deprecated - RecordId get(@Nonnull RecordId before); - - /** - * Adds a new entry to the compaction map. Overwriting a previously - * added entry is not supported. - * @param before before record id - * @param after after record id - * @throws IllegalArgumentException if {@code before} already exists in the map - */ - @Deprecated - void put(@Nonnull RecordId before, @Nonnull RecordId after); - - /** - * Remove all keys from this map where {@code keys.contains(key.asUUID())}. - * @param uuids uuids of the keys to remove - */ - @Deprecated - void remove(@Nonnull Set uuids); - - /** - * Compressing this map ensures it takes up as little heap as possible. This - * operation might be expensive and should only be called in suitable intervals. - */ - @Deprecated - void compress(); - - /** - * Number of segments referenced by the keys in this map. The returned value might only - * be based on the compressed part of the map. - * @return number of segments - */ - @Deprecated - long getSegmentCount(); - - /** - * Number of records referenced by the keys in this map. The returned value might only - * be based on the compressed part of the map. - * @return number of records - */ - @Deprecated - long getRecordCount(); - - /** - * Determine whether this map contains keys at all. - * @return {@code true} iff this map is empty - */ - @Deprecated - boolean isEmpty(); - - /** - * The weight of the compaction map is its heap memory consumption in bytes. - * @return Estimated weight of the compaction map - */ - @Deprecated - long getEstimatedWeight(); -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/PersistedCompactionMap.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/PersistedCompactionMap.java deleted file mode 100644 index abe1d26..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/PersistedCompactionMap.java +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Maps.newHashMap; -import static com.google.common.collect.Maps.newTreeMap; -import static java.lang.Integer.getInteger; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.encode; - -import java.io.IOException; -import java.util.Collections; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.TreeMap; -import java.util.UUID; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A {@code PartialCompactionMap} implementation persisting its entries - * to segments. - * - * TODO In theory we could also compact the compaction map. Is there any need to do so? - */ -@Deprecated -public class PersistedCompactionMap implements PartialCompactionMap { - private static final Logger LOG = LoggerFactory.getLogger(PersistedCompactionMap.class); - - /** - * Rough estimate of the number of bytes of disk space of a map entry. - * Used by the compaction gain estimator to offset its estimate. - */ - @Deprecated - public static final int BYTES_PER_ENTRY = getInteger("bytes-per-entry", 50); - - /** - * Number of map entries to keep until compressing this map. - */ - private static final int COMPRESS_INTERVAL = getInteger("compress-interval", 10000000); - - /** - * Key used to store meta data associated with the individual map generations. Tools - * can use this to grep across segments for finding the meta data and ultimately - * to find and parse the compaction map generations. - */ - @Deprecated - public static final String PERSISTED_COMPACTION_MAP = "PersistedCompactionMap"; - - private final TreeMap recent = newTreeMap(); - - private final SegmentTracker tracker; - - private long recordCount; - private MapRecord entries; - - PersistedCompactionMap(@Nonnull SegmentTracker tracker) { - this.tracker = tracker; - } - - @Override - @Deprecated - public boolean wasCompactedTo(@Nonnull RecordId before, @Nonnull RecordId after) { - return (after.equals(get(before))); - } - - @Override - @Deprecated - public boolean wasCompacted(@Nonnull UUID uuid) { - return recent.containsKey(uuid) || - entries != null && entries.getEntry(uuid.toString()) != null; - } - - private static UUID asUUID(@Nonnull SegmentId id) { - return new UUID(id.getMostSignificantBits(), id.getLeastSignificantBits()); - } - - @Override - @CheckForNull - @Deprecated - public RecordId get(@Nonnull RecordId before) { - UUID uuid = asUUID(before.getSegmentId()); - short offset = encode(before.getOffset()); - - RecordId recordId = get(recent, uuid, offset); - if (recordId != null) { - return recordId; - } - - return get(tracker, entries, uuid, offset); - } - - @CheckForNull - private static RecordId get(@Nonnull Map map, @Nonnull UUID uuid, short offset) { - RecordIdMap newSegment = map.get(uuid); - if (newSegment != null) { - return newSegment.get(offset); - } - return null; - } - - @CheckForNull - private static RecordId get(@Nonnull SegmentTracker tracker, @Nullable MapRecord map, @Nonnull UUID uuid, short offset) { - if (map == null) { - return null; - } - - MapEntry newSegmentId = map.getEntry(uuid.toString()); - if (newSegmentId == null) { - return null; - } - MapRecord newSegment = new MapRecord(newSegmentId.getValue()); - MapEntry newRecordId = newSegment.getEntry(String.valueOf(offset)); - if (newRecordId == null) { - return null; - } - return RecordId.fromString(tracker, Segment.readString(newRecordId.getValue())); - } - - @Override - @Deprecated - public void put(@Nonnull RecordId before, @Nonnull RecordId after) { - if (get(before) != null) { - throw new IllegalArgumentException(); - } - - UUID uuid = asUUID(before.getSegmentId()); - RecordIdMap entry = recent.get(uuid); - if (entry == null) { - entry = new RecordIdMap(); - recent.put(uuid, entry); - } - entry.put(encode(before.getOffset()), after); - - if (recent.size() > COMPRESS_INTERVAL) { - compress(); - } - } - - @Override - @Deprecated - public void remove(@Nonnull Set uuids) { - compress(uuids); - } - - @Override - @Deprecated - public void compress() { - compress(Collections.emptySet()); - } - - @Override - @Deprecated - public long getSegmentCount() { - return entries == null ? 0 : entries.size(); - } - - @Override - @Deprecated - public long getRecordCount() { - return recordCount; - } - - @Override - @Deprecated - public boolean isEmpty() { - return recent.size() + recordCount == 0; - } - - private void compress(@Nonnull Set removed) { - try { - if (recent.isEmpty() && removed.isEmpty()) { - return; - } - - SegmentWriter writer = null; - Map segmentIdMap = newHashMap(); - for (Entry recentEntry : recent.entrySet()) { - UUID uuid = recentEntry.getKey(); - RecordIdMap newSegment = recentEntry.getValue(); - - if (removed.contains(uuid)) { - continue; - } - - MapRecord base; - MapEntry baseEntry = entries == null ? null : entries.getEntry(uuid.toString()); - base = baseEntry == null ? null : new MapRecord(baseEntry.getValue()); - - if (writer == null) { - writer = tracker.createSegmentWriter(createWid()); - } - - Map offsetMap = newHashMap(); - for (int k = 0; k < newSegment.size(); k++) { - offsetMap.put(String.valueOf(newSegment.getKey(k)), - writer.writeString(newSegment.getRecordId(k).toString10())); - } - RecordId newEntryId = writer.writeMap(base, offsetMap).getRecordId(); - segmentIdMap.put(uuid.toString(), newEntryId); - recordCount += offsetMap.size(); - } - - if (entries != null) { - for (UUID uuid : removed) { - MapEntry toRemove = entries.getEntry(uuid.toString()); - if (toRemove != null) { - segmentIdMap.put(uuid.toString(), null); - recordCount -= new MapRecord(toRemove.getValue()).size(); - } - } - } - - if (!segmentIdMap.isEmpty()) { - if (writer == null) { - writer = tracker.createSegmentWriter(createWid()); - } - - RecordId previousBaseId = entries == null ? null : entries.getRecordId(); - entries = writer.writeMap(entries, segmentIdMap); - entries.getSegment().getSegmentId().pin(); - String mapInfo = PERSISTED_COMPACTION_MAP + '{' + - "id=" + entries.getRecordId() + - ", baseId=" + previousBaseId + '}'; - writer.writeString(mapInfo); - writer.flush(); - } - - recent.clear(); - if (recordCount == 0) { - entries = null; - } - } catch (IOException e) { - LOG.error("Error compression compaction map", e); - throw new IllegalStateException("Unexpected IOException", e); - } - } - - @Nonnull - private String createWid() { - return "cm-" + (tracker.getCompactionMap().getGeneration() + 1); - } - - /** - * @return 0 - */ - @Override - @Deprecated - public long getEstimatedWeight() { - return 0; - } - - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/PropertyTemplate.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/PropertyTemplate.java deleted file mode 100644 index a43d1e3..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/PropertyTemplate.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import javax.annotation.Nonnull; - -import static com.google.common.base.Preconditions.checkNotNull; - -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; - -import com.google.common.collect.ComparisonChain; - -/** - * A property definition within a template (the property name, the type, and the - * index within the list of properties for the given node). - */ -class PropertyTemplate implements Comparable { - - /** - * The index of this property within the list of properties in the node - * template. - */ - private final int index; - - private final String name; - - private final Type type; - - PropertyTemplate(int index, String name, Type type) { - this.index = index; - this.name = checkNotNull(name); - this.type = checkNotNull(type); - } - - PropertyTemplate(PropertyState state) { - checkNotNull(state); - this.index = 0; - this.name = state.getName(); - this.type = state.getType(); - } - - public int getIndex() { - return index; - } - - public String getName() { - return name; - } - - public Type getType() { - return type; - } - - //--------------------------------------------------------< Comparable >-- - - @Override - public int compareTo(@Nonnull PropertyTemplate template) { - checkNotNull(template); - return ComparisonChain.start() - .compare(hashCode(), template.hashCode()) // important - .compare(name, template.name) - .compare(type, template.type) - .result(); - } - - //------------------------------------------------------------< Object >-- - - @Override - public boolean equals(Object object) { - if (this == object) { - return true; - } else if (object instanceof PropertyTemplate) { - PropertyTemplate that = (PropertyTemplate) object; - return name.equals(that.name) && type.equals(that.type); - } else { - return false; - } - } - - @Override - public int hashCode() { - return name.hashCode(); - } - - @Override - public String toString() { - return name + "(" + type + ")"; - } - -} \ No newline at end of file diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Record.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Record.java deleted file mode 100644 index 736704e..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Record.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import javax.annotation.Nonnull; - -/** - * Record within a segment. - */ -class Record { - - static boolean fastEquals(Object a, Object b) { - return a instanceof Record && fastEquals((Record) a, b); - } - - static boolean fastEquals(Record a, Object b) { - return b instanceof Record && fastEquals(a, (Record) b); - } - - static boolean fastEquals(Record a, Record b) { - return a.offset == b.offset && a.segmentId.equals(b.segmentId); - } - - /** - * Identifier of the segment that contains this record. - */ - private final SegmentId segmentId; - - /** - * Segment offset of this record. - */ - private final int offset; - - /** - * Creates a new object for the identified record. - * - * @param id record identified - */ - protected Record(@Nonnull RecordId id) { - this(id.getSegmentId(), id.getOffset()); - } - - protected Record(@Nonnull SegmentId segmentId, int offset) { - this.segmentId = segmentId; - this.offset = offset; - } - - protected boolean wasCompactedTo(Record after) { - CompactionMap map = segmentId.getTracker().getCompactionMap(); - return map.wasCompactedTo(getRecordId(), after.getRecordId()); - } - - /** - * Returns the tracker of the segment that contains this record. - * - * @return segment tracker - */ - protected SegmentTracker getTracker() { - return segmentId.getTracker(); - } - - /** - * Returns the segment that contains this record. - * - * @return segment that contains this record - */ - protected Segment getSegment() { - return segmentId.getSegment(); - } - - /** - * Returns the identifier of this record. - * - * @return record identifier - */ - public RecordId getRecordId() { - return new RecordId(segmentId, offset); - } - - /** - * Returns the segment offset of this record. - * - * @return segment offset of this record - */ - protected final int getOffset() { - return offset; - } - - /** - * Returns the segment offset of the given byte position in this record. - * - * @param position byte position within this record - * @return segment offset of the given byte position - */ - protected final int getOffset(int position) { - return getOffset() + position; - } - - /** - * Returns the segment offset of a byte position in this record. - * The position is calculated from the given number of raw bytes and - * record identifiers. - * - * @param bytes number of raw bytes before the position - * @param ids number of record identifiers before the position - * @return segment offset of the specified byte position - */ - protected final int getOffset(int bytes, int ids) { - return getOffset(bytes + ids * Segment.RECORD_ID_BYTES); - } - - //------------------------------------------------------------< Object >-- - - @Override - public boolean equals(Object that) { - return fastEquals(this, that); - } - - @Override - public int hashCode() { - return segmentId.hashCode() ^ offset; - } - - @Override - public String toString() { - return getRecordId().toString(); - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordId.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordId.java deleted file mode 100644 index e934b7b..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordId.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; -import static java.lang.Integer.parseInt; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.RECORD_ALIGN_BITS; - -import java.util.UUID; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * The record id. This includes the segment id and the offset within the - * segment. - */ -@Deprecated -public final class RecordId implements Comparable { - - private static final Pattern PATTERN = Pattern.compile( - "([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})" - + "(:(0|[1-9][0-9]*)|\\.([0-9a-f]{4}))"); - - @Deprecated - public static RecordId[] EMPTY_ARRAY = new RecordId[0]; - - @Deprecated - public static RecordId fromString(SegmentTracker factory, String id) { - Matcher matcher = PATTERN.matcher(id); - if (matcher.matches()) { - UUID uuid = UUID.fromString(matcher.group(1)); - SegmentId segmentId = factory.getSegmentId( - uuid.getMostSignificantBits(), - uuid.getLeastSignificantBits()); - - int offset; - if (matcher.group(3) != null) { - offset = parseInt(matcher.group(3)); - } else { - offset = parseInt(matcher.group(4), 16) << RECORD_ALIGN_BITS; - } - - return new RecordId(segmentId, offset); - } else { - throw new IllegalArgumentException("Bad record identifier: " + id); - } - } - - private final SegmentId segmentId; - - private final int offset; - - @Deprecated - public RecordId(SegmentId segmentId, int offset) { - checkArgument(offset < Segment.MAX_SEGMENT_SIZE); - checkArgument((offset % (1 << RECORD_ALIGN_BITS)) == 0); - this.segmentId = checkNotNull(segmentId); - this.offset = offset; - } - - @Deprecated - public SegmentId getSegmentId() { - return segmentId; - } - - @Deprecated - public int getOffset() { - return offset; - } - - /** - * @return the segment id part of this record id as UUID - */ - @Deprecated - public UUID asUUID() { - return segmentId.asUUID(); - } - - @Deprecated - public Segment getSegment() { - return segmentId.getSegment(); - } - - //--------------------------------------------------------< Comparable >-- - - @Override - @Deprecated - public int compareTo(RecordId that) { - checkNotNull(that); - int diff = segmentId.compareTo(that.segmentId); - if (diff == 0) { - diff = offset - that.offset; - } - return diff; - } - - //------------------------------------------------------------< Object >-- - - @Override - @Deprecated - public String toString() { - return String.format("%s.%04x", segmentId, offset >> RECORD_ALIGN_BITS); - } - - /** - * Returns the record id string representation used in Oak 1.0. - */ - @Deprecated - public String toString10() { - return String.format("%s:%d", segmentId, offset); - } - - @Override - @Deprecated - public int hashCode() { - return segmentId.hashCode() ^ offset; - } - - @Override - @Deprecated - public boolean equals(Object object) { - if (this == object) { - return true; - } else if (object instanceof RecordId) { - RecordId that = (RecordId) object; - return offset == that.offset && segmentId.equals(that.segmentId); - } else { - return false; - } - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordIdMap.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordIdMap.java deleted file mode 100644 index 49eb3b3..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordIdMap.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static java.lang.System.arraycopy; -import static java.util.Arrays.binarySearch; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; - -/** - * A memory optimised map of {@code short} key to {@link RecordId} values. - */ -@Deprecated -public class RecordIdMap { - private static final short[] NO_KEYS = new short[0]; - private static final RecordId[] NO_VALUES = new RecordId[0]; - - private short[] keys = NO_KEYS; - private RecordId[] values = NO_VALUES; - - /** - * Associates {@code key} with {@code value} if not already present - * @param key - * @param value - * @return {@code true} if added, {@code false} if already present - */ - @Deprecated - public boolean put(short key, @Nonnull RecordId value) { - if (keys.length == 0) { - keys = new short[1]; - values = new RecordId[1]; - keys[0] = key; - values[0] = value; - return true; - } else { - int k = binarySearch(keys, key); - if (k < 0) { - int l = -k - 1; - short[] newKeys = new short[keys.length + 1]; - RecordId[] newValues = new RecordId[(values.length + 1)]; - arraycopy(keys, 0, newKeys, 0, l); - arraycopy(values, 0, newValues, 0, l); - newKeys[l] = key; - newValues[l] = value; - int c = keys.length - l; - if (c > 0) { - arraycopy(keys, l, newKeys, l + 1, c); - arraycopy(values, l, newValues, l + 1, c); - } - keys = newKeys; - values = newValues; - return true; - } else { - return false; - } - } - } - - /** - * Returns the value associated with a given {@code key} or {@code null} if none. - * @param key the key to retrieve - * @return the value associated with a given {@code key} or {@code null} if none. - */ - @CheckForNull - @Deprecated - public RecordId get(short key) { - int k = binarySearch(keys, key); - if (k >= 0) { - return values[k]; - } else { - return null; - } - } - - /** - * Check whether {@code key} is present is this map. - * @param key the key to check for - * @return {@code true} iff {@code key} is present. - */ - @Deprecated - public boolean containsKey(short key) { - return binarySearch(keys, key) >= 0; - } - - /** - * @return the number of keys in this map - */ - @Deprecated - public int size() { - return keys.length; - } - - /** - * Retrieve the key at a given index. Keys are ordered according - * the natural ordering of shorts. - * @param index - * @return the key at {@code index} - * @throws ArrayIndexOutOfBoundsException if not {@code 0 <= index < size()} - */ - @Deprecated - public short getKey(int index) { - return keys[index]; - } - - /** - * Retrieve the value at a given index. Keys are ordered according - * the natural ordering of shorts. - * @param index - * @return the value at {@code index} - * @throws ArrayIndexOutOfBoundsException if not {@code 0 <= index < size()} - */ - @Nonnull - @Deprecated - public RecordId getRecordId(int index) { - return values[index]; - } -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordIdSet.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordIdSet.java deleted file mode 100644 index e4845a9..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordIdSet.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Maps.newHashMap; -import static java.lang.System.arraycopy; -import static java.util.Arrays.binarySearch; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.RECORD_ALIGN_BITS; - -import java.util.Map; - -/** - * A memory optimised set of {@link RecordId}s. - * - * The set doesn't keep references to the actual record ids - * it contains. - */ -@Deprecated -public class RecordIdSet { - private final Map seenIds = newHashMap(); - - /** - * Add {@code id} to this set if not already present - * @param id the record id to add - * @return {@code true} if added, {@code false} if already present - */ - @Deprecated - public boolean addIfNotPresent(RecordId id) { - String segmentId = id.getSegmentId().toString(); - ShortSet offsets = seenIds.get(segmentId); - if (offsets == null) { - offsets = new ShortSet(); - seenIds.put(segmentId, offsets); - } - return offsets.add(crop(id.getOffset())); - } - - /** - * Check whether {@code id} is present is this set. - * @param id the record id to check for - * @return {@code true} iff {@code id} is present. - */ - @Deprecated - public boolean contains(RecordId id) { - String segmentId = id.getSegmentId().toString(); - ShortSet offsets = seenIds.get(segmentId); - return offsets != null && offsets.contains(crop(id.getOffset())); - } - - private static short crop(int value) { - return (short) (value >> RECORD_ALIGN_BITS); - } - - static class ShortSet { - short[] elements; - - boolean add(short n) { - if (elements == null) { - elements = new short[1]; - elements[0] = n; - return true; - } else { - int k = binarySearch(elements, n); - if (k < 0) { - int l = -k - 1; - short[] e = new short[elements.length + 1]; - arraycopy(elements, 0, e, 0, l); - e[l] = n; - int c = elements.length - l; - if (c > 0) { - arraycopy(elements, l, e, l + 1, c); - } - elements = e; - return true; - } else { - return false; - } - } - } - - boolean contains(short n) { - return elements != null && binarySearch(elements, n) >= 0; - } - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordType.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordType.java deleted file mode 100644 index a3e4f33..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordType.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -/** - * The type of a record in a segment. - */ -@Deprecated -public enum RecordType { - - /** - * A leaf of a map (which is a HAMT tree). This contains - *

    - *
  • the size (int)
  • - *
  • for each entry, the hash code of the key (4 bytes), then the record id of - * the key and the record id of the value
  • - *
- */ - @Deprecated - LEAF, - - /** - * A branch of a map (which is a HAMT tree). This contains - *
    - *
  • level within the HAMT structure (4 most significant bits), plus size - * of the that branch of the map
  • - *
  • bitmap (4 bytes)
  • - *
  • record ids of the buckets of the next level of the map
  • - *
- * There is a special case: if the first int (level/size) is -1, then it's a - * diff record, to handle the common case of when exactly one existing child - * node was modified. This is common because whenever one node was changed, - * we need to propagate that up to the root. - *
    - *
  • -1 (int)
  • - *
  • hash code of the key that was changed (4 bytes)
  • - *
  • the record id of the key
  • - *
  • the record id of the value
  • - *
  • the record id of the (base version of the) modified map
  • - *
- * There is only ever one single diff record for a map. - */ - @Deprecated - BRANCH, - - /** - * A bucket (a list of references). It always includes at least 2 elements, - * up to 255 entries (because each entry could in theory point to a - * different segment, in which case this couldn't be stored in a segement). - * This contains just the record ids. The size of the list is not stored, as - * it is stored along with the reference to this record. - */ - @Deprecated - BUCKET, - - /** - * A list including the size (an int). This could be 0, in which case there - * is no reference. If the size is 1, then reference points to the value of - * the list. If the size is larger, then a record id follows, which points - * to a bucket with the actual record ids. If there are more than 255 - * entries in the list, then the list is partitioned into sublists of 255 - * entries each, which are stored kind of recursively. - */ - @Deprecated - LIST, - - /** - * A value (for example a string, or a long, or a blob). The format is: - * length (variable length encoding, one byte if shorter than 128, else more - * bytes), then the data as a byte array, or, for large values, a record id - * of the top level bucket that contains the list of block record ids of the - * actual binary data. - *

- * Therefore, a value can reference other records. - */ - @Deprecated - VALUE, - - /** - * A block of bytes (a binary value, or a part of a binary value, or part of - * large strings). It only contains the raw data. - */ - @Deprecated - BLOCK, - - /** - * A template (the "hidden class" of a node; inspired by the Chrome V8 - * Javascript engine). This includes a list of property templates. Format: - *

    - *
  • head (int), which is: 1 bit (most significant one) whether the node - * has a single valued jcr:primaryType property. 1 bit whether it has - * mixins, in which case 10 bits (27 to 18) are used for the number of - * mixins. 1 bit whether the node has no child nodes. 1 bit whether the node - * has more than one child nodes. 18 bits (0 to 17) the number of properties - * (0 to 262143).
  • - *
  • The record ids of: if needed, record id of the primary type (a - * value), record ids of the mixin names (value records), for single child - * node: the name of the child node
  • - *
  • The list of record ids of property names (which are stored before the - * template in separate value records), and the property type (negative - * values for multi-value properties).
  • - *
- */ - @Deprecated - TEMPLATE, - - /** - * A JCR node, which contains a list of record ids: - *
    - *
  • the record id of the template
  • - *
  • depending on the template, the record id of the map of the ids of the - * child node name(s) and child node record id(s), or if there is just one - * child node, the child node record id
  • - *
  • the record ids of the property values (for multi-valued property a - * pointer to the list record)
  • - *
- */ - @Deprecated - NODE - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordUsageAnalyser.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordUsageAnalyser.java deleted file mode 100644 index 32c81a6..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordUsageAnalyser.java +++ /dev/null @@ -1,381 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Sets.newHashSet; -import static org.apache.commons.io.FileUtils.byteCountToDisplaySize; - -import java.util.Formatter; -import java.util.Set; - -/** - * This utility breaks down space usage per record type. - * It accounts for value sharing. That is, an instance - * of this class will remember which records it has seen - * already and not count those again. Only the effective - * space taken by the records is taken into account. Slack - * space from aligning records is not accounted for. - */ -@Deprecated -public class RecordUsageAnalyser extends SegmentParser { - private final RecordIdSet seenIds = new RecordIdSet(); - private final Set deadLinks = newHashSet(); - - private long mapSize; // leaf and branch - private long listSize; // list and bucket - private long valueSize; // inlined values - private long templateSize; // template - private long nodeSize; // node - - private long mapCount; - private long listCount; - private long propertyCount; - private long smallBlobCount; - private long mediumBlobCount; - private long longBlobCount; - private long externalBlobCount; - private long smallStringCount; - private long mediumStringCount; - private long longStringCount; - private long templateCount; - private long nodeCount; - - /** - * @return number of bytes in {@link RecordType#LEAF leaf} and {@link RecordType#BRANCH branch} - * records. - */ - @Deprecated - public long getMapSize() { - return mapSize; - } - - /** - * @return number of bytes in {@link RecordType#LIST list} and {@link RecordType#BUCKET bucket} - * records. - */ - @Deprecated - public long getListSize() { - return listSize; - } - - /** - * @return number of bytes in inlined values (strings and blobs) - */ - @Deprecated - public long getValueSize() { - return valueSize; - } - - /** - * @return number of bytes in {@link RecordType#TEMPLATE template} records. - */ - @Deprecated - public long getTemplateSize() { - return templateSize; - } - - /** - * @return number of bytes in {@link RecordType#NODE node} records. - */ - @Deprecated - public long getNodeSize() { - return nodeSize; - } - - /** - * @return number of maps - */ - @Deprecated - public long getMapCount() { - return mapCount; - } - - /** - * @return number of lists - */ - @Deprecated - public long getListCount() { - return listCount; - } - - /** - * @return number of properties - */ - @Deprecated - public long getPropertyCount() { - return propertyCount; - } - - /** - * @return number of {@link Segment#SMALL_LIMIT small} blobs. - * - */ - @Deprecated - public long getSmallBlobCount() { - return smallBlobCount; - } - - /** - * @return number of {@link Segment#MEDIUM_LIMIT medium} blobs. - * - */ - @Deprecated - public long getMediumBlobCount() { - return mediumBlobCount; - } - - /** - * @return number of long blobs. - * - */ - @Deprecated - public long getLongBlobCount() { - return longBlobCount; - } - - /** - * @return number of external blobs. - * - */ - @Deprecated - public long getExternalBlobCount() { - return externalBlobCount; - } - - /** - * @return number of {@link Segment#SMALL_LIMIT small} strings. - * - */ - @Deprecated - public long getSmallStringCount() { - return smallStringCount; - } - - /** - * @return number of {@link Segment#MEDIUM_LIMIT medium} strings. - * - */ - @Deprecated - public long getMediumStringCount() { - return mediumStringCount; - } - - /** - * @return number of long strings. - * - */ - @Deprecated - public long getLongStringCount() { - return longStringCount; - } - - /** - * @return number of templates. - */ - @Deprecated - public long getTemplateCount() { - return templateCount; - } - - /** - * @return number of nodes. - */ - @Deprecated - public long getNodeCount() { - return nodeCount; - } - - @Deprecated - public void analyseNode(RecordId nodeId) { - onNode(null, nodeId); - } - - @Override - @Deprecated - public String toString() { - StringBuilder sb = new StringBuilder(); - @SuppressWarnings("resource") - Formatter formatter = new Formatter(sb); - formatter.format( - "%s in maps (%s leaf and branch records)%n", - byteCountToDisplaySize(mapSize), mapCount); - formatter.format( - "%s in lists (%s list and bucket records)%n", - byteCountToDisplaySize(listSize), listCount); - formatter.format( - "%s in values (value and block records of %s properties, " + - "%s/%s/%s/%s small/medium/long/external blobs, %s/%s/%s small/medium/long strings)%n", - byteCountToDisplaySize(valueSize), propertyCount, - smallBlobCount, mediumBlobCount, longBlobCount, externalBlobCount, - smallStringCount, mediumStringCount, longStringCount); - formatter.format( - "%s in templates (%s template records)%n", - byteCountToDisplaySize(templateSize), templateCount); - formatter.format( - "%s in nodes (%s node records)%n", - byteCountToDisplaySize(nodeSize), nodeCount); - formatter.format("links to non existing segments: %s", deadLinks); - return sb.toString(); - } - - @Override - protected void onNode(RecordId parentId, RecordId nodeId) { - try { - if (seenIds.addIfNotPresent(nodeId)) { - NodeInfo info = parseNode(nodeId); - this.nodeCount++; - this.nodeSize += info.size; - } - } catch (SegmentNotFoundException snfe) { - deadLinks.add(snfe.getSegmentId()); - } - } - - @Override - protected void onTemplate(RecordId parentId, RecordId templateId) { - try { - if (seenIds.addIfNotPresent(templateId)) { - TemplateInfo info = parseTemplate(templateId); - this.templateCount++; - this.templateSize += info.size; - } - } catch (SegmentNotFoundException snfe) { - deadLinks.add(snfe.getSegmentId()); - } - } - - @Override - protected void onMapDiff(RecordId parentId, RecordId mapId, MapRecord map) { - try { - if (seenIds.addIfNotPresent(mapId)) { - MapInfo info = parseMapDiff(mapId, map); - this.mapCount++; - this.mapSize += info.size; - } - } catch (SegmentNotFoundException snfe) { - deadLinks.add(snfe.getSegmentId()); - } - } - - @Override - protected void onMapLeaf(RecordId parentId, RecordId mapId, MapRecord map) { - try { - if (seenIds.addIfNotPresent(mapId)) { - MapInfo info = parseMapLeaf(mapId, map); - this.mapCount++; - this.mapSize += info.size; - } - } catch (SegmentNotFoundException snfe) { - deadLinks.add(snfe.getSegmentId()); - } - } - - @Override - protected void onMapBranch(RecordId parentId, RecordId mapId, MapRecord map) { - try { - if (seenIds.addIfNotPresent(mapId)) { - MapInfo info = parseMapBranch(mapId, map); - this.mapCount++; - this.mapSize += info.size; - } - } catch (SegmentNotFoundException snfe) { - deadLinks.add(snfe.getSegmentId()); - } - } - - @Override - protected void onProperty(RecordId parentId, RecordId propertyId, PropertyTemplate template) { - try { - if (!seenIds.contains(propertyId)) { - PropertyInfo info = parseProperty(parentId, propertyId, template); - this.propertyCount++; - this.valueSize += info.size; - seenIds.addIfNotPresent(propertyId); - } - } catch (SegmentNotFoundException snfe) { - deadLinks.add(snfe.getSegmentId()); - } - } - - @Override - protected void onBlob(RecordId parentId, RecordId blobId) { - try { - if (seenIds.addIfNotPresent(blobId)) { - BlobInfo info = parseBlob(blobId); - this.valueSize += info.size; - switch (info.blobType) { - case SMALL: - this.smallBlobCount++; - break; - case MEDIUM: - this.mediumBlobCount++; - break; - case LONG: - this.longBlobCount++; - break; - case EXTERNAL: - this.externalBlobCount++; - break; - } - } - } catch (SegmentNotFoundException snfe) { - deadLinks.add(snfe.getSegmentId()); - } - } - - @Override - protected void onString(RecordId parentId, RecordId stringId) { - try { - if (seenIds.addIfNotPresent(stringId)) { - BlobInfo info = parseString(stringId); - this.valueSize += info.size; - switch (info.blobType) { - case SMALL: - this.smallStringCount++; - break; - case MEDIUM: - this.mediumStringCount++; - break; - case LONG: - this.longStringCount++; - break; - case EXTERNAL: - throw new IllegalStateException("String is too long: " + info.size); - } - } - } catch (SegmentNotFoundException snfe) { - deadLinks.add(snfe.getSegmentId()); - } - } - - @Override - protected void onList(RecordId parentId, RecordId listId, int count) { - try { - if (seenIds.addIfNotPresent(listId)) { - ListInfo info = parseList(parentId, listId, count); - this.listCount++; - this.listSize += info.size; - } - } catch (SegmentNotFoundException snfe) { - deadLinks.add(snfe.getSegmentId()); - } - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordWriters.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordWriters.java deleted file mode 100644 index bf27479..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordWriters.java +++ /dev/null @@ -1,513 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.collect.Lists.newArrayListWithCapacity; -import static java.util.Arrays.sort; -import static java.util.Collections.singleton; -import static org.apache.jackrabbit.oak.plugins.segment.MapRecord.SIZE_BITS; -import static org.apache.jackrabbit.oak.plugins.segment.RecordType.BLOCK; -import static org.apache.jackrabbit.oak.plugins.segment.RecordType.BRANCH; -import static org.apache.jackrabbit.oak.plugins.segment.RecordType.BUCKET; -import static org.apache.jackrabbit.oak.plugins.segment.RecordType.LEAF; -import static org.apache.jackrabbit.oak.plugins.segment.RecordType.LIST; -import static org.apache.jackrabbit.oak.plugins.segment.RecordType.NODE; -import static org.apache.jackrabbit.oak.plugins.segment.RecordType.TEMPLATE; -import static org.apache.jackrabbit.oak.plugins.segment.RecordType.VALUE; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.SMALL_LIMIT; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.V_11; - -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; -import java.util.List; - -final class RecordWriters { - private RecordWriters() {} - - /** - * Base class for all record writers - */ - public abstract static class RecordWriter { - private final RecordType type; - protected final int size; - protected final Collection ids; - - protected RecordWriter(RecordType type, int size, - Collection ids) { - this.type = type; - this.size = size; - this.ids = ids; - } - - protected RecordWriter(RecordType type, int size, RecordId id) { - this(type, size, singleton(id)); - } - - protected RecordWriter(RecordType type, int size) { - this(type, size, Collections. emptyList()); - } - - public final T write(SegmentBufferWriter writer) throws IOException { - RecordId id = writer.prepare(type, size, ids); - return writeRecordContent(id, writer); - } - - protected abstract T writeRecordContent(RecordId id, - SegmentBufferWriter writer); - } - - public static RecordWriter newMapLeafWriter(int level, Collection entries) { - return new MapLeafWriter(level, entries); - } - - public static RecordWriter newMapLeafWriter() { - return new MapLeafWriter(); - } - - public static RecordWriter newMapBranchWriter(int level, int entryCount, int bitmap, List ids) { - return new MapBranchWriter(level, entryCount, bitmap, ids); - } - - public static RecordWriter newMapBranchWriter(int bitmap, List ids) { - return new MapBranchWriter(bitmap, ids); - } - - public static RecordWriter newListWriter(int count, RecordId lid) { - return new ListWriter(count, lid); - } - - public static RecordWriter newListWriter() { - return new ListWriter(); - } - - public static RecordWriter newListBucketWriter(List ids) { - return new ListBucketWriter(ids); - } - - public static RecordWriter newBlockWriter(byte[] bytes, int offset, int length) { - return new BlockWriter(bytes, offset, length); - } - - public static RecordWriter newValueWriter(RecordId rid, long len) { - return new SingleValueWriter(rid, len); - } - - public static RecordWriter newValueWriter(int length, byte[] data) { - return new ArrayValueWriter(length, data); - } - - /** - * Write a large blob ID. A blob ID is considered large if the length of its - * binary representation is equal to or greater than {@code - * Segment.BLOB_ID_SMALL_LIMIT}. - */ - public static RecordWriter newBlobIdWriter(RecordId rid) { - return new LargeBlobIdWriter(rid); - } - - /** - * Write a small blob ID. A blob ID is considered small if the length of its - * binary representation is less than {@code Segment.BLOB_ID_SMALL_LIMIT}. - */ - public static RecordWriter newBlobIdWriter(byte[] blobId) { - return new SmallBlobIdWriter(blobId); - } - - public static RecordWriter newTemplateWriter(Collection ids, - RecordId[] propertyNames, byte[] propertyTypes, int head, RecordId primaryId, - List mixinIds, RecordId childNameId, RecordId propNamesId, - SegmentVersion version) { - return new TemplateWriter(ids, propertyNames, propertyTypes, head, primaryId, mixinIds, - childNameId, propNamesId, version); - } - - public static RecordWriter newNodeStateWriter(List ids) { - return new NodeStateWriter(ids); - } - - /** - * Map Leaf record writer. - * @see RecordType#LEAF - */ - private static class MapLeafWriter extends RecordWriter { - private final int level; - private final Collection entries; - - private MapLeafWriter() { - super(LEAF, 4); - this.level = -1; - this.entries = null; - } - - private MapLeafWriter(int level, Collection entries) { - super(LEAF, 4 + entries.size() * 4, extractIds(entries)); - this.level = level; - this.entries = entries; - } - - private static List extractIds(Collection entries) { - List ids = newArrayListWithCapacity(2 * entries.size()); - for (MapEntry entry : entries) { - ids.add(entry.getKey()); - ids.add(entry.getValue()); - } - return ids; - } - - @Override - protected MapRecord writeRecordContent(RecordId id, - SegmentBufferWriter writer) { - if (entries != null) { - int size = entries.size(); - writer.writeInt((level << SIZE_BITS) | size); - - // copy the entries to an array so we can sort them before - // writing - MapEntry[] array = entries.toArray(new MapEntry[size]); - sort(array); - - for (MapEntry entry : array) { - writer.writeInt(entry.getHash()); - } - for (MapEntry entry : array) { - writer.writeRecordId(entry.getKey()); - writer.writeRecordId(entry.getValue()); - } - } else { - writer.writeInt(0); - } - return new MapRecord(id); - } - } - - /** - * Map Branch record writer. - * @see RecordType#BRANCH - */ - private static class MapBranchWriter extends RecordWriter { - private final int level; - private final int entryCount; - private final int bitmap; - - /* - * Write a regular map branch - */ - private MapBranchWriter(int level, int entryCount, int bitmap, List ids) { - super(BRANCH, 8, ids); - this.level = level; - this.entryCount = entryCount; - this.bitmap = bitmap; - } - - /* - * Write a diff map - */ - private MapBranchWriter(int bitmap, List ids) { - // level = 0 and and entryCount = -1 -> this is a map diff - this(0, -1, bitmap, ids); - } - - @Override - protected MapRecord writeRecordContent(RecordId id, SegmentBufferWriter writer) { - // -1 to encode a map diff (if level == 0 and entryCount == -1) - writer.writeInt((level << SIZE_BITS) | entryCount); - writer.writeInt(bitmap); - for (RecordId mapId : ids) { - writer.writeRecordId(mapId); - } - return new MapRecord(id); - } - } - - /** - * List record writer. - * @see RecordType#LIST - */ - private static class ListWriter extends RecordWriter { - private final int count; - private final RecordId lid; - - private ListWriter() { - super(LIST, 4); - count = 0; - lid = null; - } - - private ListWriter(int count, RecordId lid) { - super(LIST, 4, lid); - this.count = count; - this.lid = lid; - } - - @Override - protected RecordId writeRecordContent(RecordId id, - SegmentBufferWriter writer) { - writer.writeInt(count); - if (lid != null) { - writer.writeRecordId(lid); - } - return id; - } - } - - /** - * List Bucket record writer. - * - * @see RecordType#BUCKET - */ - private static class ListBucketWriter extends RecordWriter { - - private ListBucketWriter(List ids) { - super(BUCKET, 0, ids); - } - - @Override - protected RecordId writeRecordContent(RecordId id, - SegmentBufferWriter writer) { - for (RecordId bucketId : ids) { - writer.writeRecordId(bucketId); - } - return id; - } - } - - /** - * Block record writer. - * @see SegmentWriter#writeBlock - * @see RecordType#BLOCK - */ - private static class BlockWriter extends RecordWriter { - private final byte[] bytes; - private final int offset; - - private BlockWriter(byte[] bytes, int offset, int length) { - super(BLOCK, length); - this.bytes = bytes; - this.offset = offset; - } - - @Override - protected RecordId writeRecordContent(RecordId id, - SegmentBufferWriter writer) { - writer.writeBytes(bytes, offset, size); - return id; - } - } - - /** - * Single RecordId record writer. - * @see SegmentWriter#writeValueRecord - * @see RecordType#VALUE - */ - private static class SingleValueWriter extends RecordWriter { - private final RecordId rid; - private final long len; - - private SingleValueWriter(RecordId rid, long len) { - super(VALUE, 8, rid); - this.rid = rid; - this.len = len; - } - - @Override - protected RecordId writeRecordContent(RecordId id, - SegmentBufferWriter writer) { - writer.writeLong(len); - writer.writeRecordId(rid); - return id; - } - } - - /** - * Bye array record writer. Used as a special case for short binaries (up to - * about {@code Segment#MEDIUM_LIMIT}): store them directly as small or - * medium-sized value records. - * @see SegmentWriter#writeValueRecord - * @see Segment#MEDIUM_LIMIT - * @see RecordType#VALUE - */ - private static class ArrayValueWriter extends RecordWriter { - private final int length; - private final byte[] data; - - private ArrayValueWriter(int length, byte[] data) { - super(VALUE, length + getSizeDelta(length)); - this.length = length; - this.data = data; - } - - private static boolean isSmallSize(int length) { - return length < SMALL_LIMIT; - } - - private static int getSizeDelta(int length) { - if (isSmallSize(length)) { - return 1; - } else { - return 2; - } - } - - @Override - protected RecordId writeRecordContent(RecordId id, SegmentBufferWriter writer) { - if (isSmallSize(length)) { - writer.writeByte((byte) length); - } else { - writer.writeShort((short) ((length - SMALL_LIMIT) | 0x8000)); - } - writer.writeBytes(data, 0, length); - return id; - } - } - - /** - * Large Blob record writer. A blob ID is considered large if the length of - * its binary representation is equal to or greater than - * {@code Segment#BLOB_ID_SMALL_LIMIT}. - * - * @see Segment#BLOB_ID_SMALL_LIMIT - * @see RecordType#VALUE - */ - private static class LargeBlobIdWriter extends RecordWriter { - private final RecordId stringRecord; - - private LargeBlobIdWriter(RecordId stringRecord) { - super(VALUE, 1, stringRecord); - this.stringRecord = stringRecord; - } - - @Override - protected RecordId writeRecordContent(RecordId id, - SegmentBufferWriter writer) { - // The length uses a fake "length" field that is always equal to - // 0xF0. - // This allows the code to take apart small from a large blob IDs. - writer.writeByte((byte) 0xF0); - writer.writeRecordId(stringRecord); - writer.addBlobRef(id); - return id; - } - } - - /** - * Small Blob record writer. A blob ID is considered small if the length of - * its binary representation is less than {@code Segment#BLOB_ID_SMALL_LIMIT}. - - * @see Segment#BLOB_ID_SMALL_LIMIT - * @see RecordType#VALUE - */ - private static class SmallBlobIdWriter extends RecordWriter { - private final byte[] blobId; - - private SmallBlobIdWriter(byte[] blobId) { - super(VALUE, 2 + blobId.length); - checkArgument(blobId.length < Segment.BLOB_ID_SMALL_LIMIT); - this.blobId = blobId; - } - - @Override - protected RecordId writeRecordContent(RecordId id, - SegmentBufferWriter writer) { - int length = blobId.length; - writer.writeShort((short) (length | 0xE000)); - writer.writeBytes(blobId, 0, length); - writer.addBlobRef(id); - return id; - } - } - - /** - * Template record writer. - * @see RecordType#TEMPLATE - */ - private static class TemplateWriter extends RecordWriter { - private final RecordId[] propertyNames; - private final byte[] propertyTypes; - private final int head; - private final RecordId primaryId; - private final List mixinIds; - private final RecordId childNameId; - private final RecordId propNamesId; - private final SegmentVersion version; - - private TemplateWriter(Collection ids, RecordId[] propertyNames, - byte[] propertyTypes, int head, RecordId primaryId, List mixinIds, - RecordId childNameId, RecordId propNamesId, SegmentVersion version) { - super(TEMPLATE, 4 + propertyTypes.length, ids); - this.propertyNames = propertyNames; - this.propertyTypes = propertyTypes; - this.head = head; - this.primaryId = primaryId; - this.mixinIds = mixinIds; - this.childNameId = childNameId; - this.propNamesId = propNamesId; - this.version = version; - } - - @Override - protected RecordId writeRecordContent(RecordId id, - SegmentBufferWriter writer) { - writer.writeInt(head); - if (primaryId != null) { - writer.writeRecordId(primaryId); - } - if (mixinIds != null) { - for (RecordId mixinId : mixinIds) { - writer.writeRecordId(mixinId); - } - } - if (childNameId != null) { - writer.writeRecordId(childNameId); - } - if (version.onOrAfter(V_11)) { - if (propNamesId != null) { - writer.writeRecordId(propNamesId); - } - } - for (int i = 0; i < propertyNames.length; i++) { - if (!version.onOrAfter(V_11)) { - // V10 only - writer.writeRecordId(propertyNames[i]); - } - writer.writeByte(propertyTypes[i]); - } - return id; - } - } - - /** - * Node State record writer. - * @see RecordType#NODE - */ - private static class NodeStateWriter extends RecordWriter { - private NodeStateWriter(List ids) { - super(NODE, 0, ids); - } - - @Override - protected SegmentNodeState writeRecordContent(RecordId id, - SegmentBufferWriter writer) { - for (RecordId recordId : ids) { - writer.writeRecordId(recordId); - } - return new SegmentNodeState(id); - } - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Segment.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Segment.java deleted file mode 100644 index 40b3aa1..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Segment.java +++ /dev/null @@ -1,759 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkPositionIndexes; -import static com.google.common.base.Preconditions.checkState; -import static com.google.common.collect.Lists.newArrayListWithCapacity; -import static com.google.common.collect.Maps.newConcurrentMap; -import static java.lang.Boolean.getBoolean; -import static org.apache.jackrabbit.oak.commons.IOUtils.closeQuietly; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.V_11; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentWriter.BLOCK_SIZE; - -import java.io.IOException; -import java.io.OutputStream; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.nio.ByteBuffer; -import java.nio.channels.Channels; -import java.nio.channels.WritableByteChannel; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.ConcurrentMap; - -import javax.annotation.CheckForNull; -import javax.annotation.Nullable; - -import com.google.common.base.Charsets; -import com.google.common.base.Function; -import org.apache.commons.io.HexDump; -import org.apache.commons.io.output.ByteArrayOutputStream; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.plugins.blob.ReferenceCollector; -import org.apache.jackrabbit.oak.plugins.memory.PropertyStates; - -/** - * A list of records. - *

- * Record data is not kept in memory, but some entries are cached (templates, - * all strings in the segment). - *

- * This class includes method to read records from the raw bytes. - */ -@Deprecated -public class Segment { - - /** - * Number of bytes used for storing a record identifier. One byte - * is used for identifying the segment and two for the record offset - * within that segment. - */ - static final int RECORD_ID_BYTES = 1 + 2; - - /** - * The limit on segment references within one segment. Since record - * identifiers use one byte to indicate the referenced segment, a single - * segment can hold references to up to 255 segments plus itself. - */ - static final int SEGMENT_REFERENCE_LIMIT = (1 << 8) - 1; // 255 - - /** - * The number of bytes (or bits of address space) to use for the - * alignment boundary of segment records. - */ - @Deprecated - public static final int RECORD_ALIGN_BITS = 2; // align at the four-byte boundary - - /** - * Maximum segment size. Record identifiers are stored as three-byte - * sequences with the first byte indicating the segment and the next - * two the offset within that segment. Since all records are aligned - * at four-byte boundaries, the two bytes can address up to 256kB of - * record data. - */ - @Deprecated - public static final int MAX_SEGMENT_SIZE = 1 << (16 + RECORD_ALIGN_BITS); // 256kB - - /** - * The size limit for small values. The variable length of small values - * is encoded as a single byte with the high bit as zero, which gives us - * seven bits for encoding the length of the value. - */ - static final int SMALL_LIMIT = 1 << 7; - - /** - * The size limit for medium values. The variable length of medium values - * is encoded as two bytes with the highest bits of the first byte set to - * one and zero, which gives us 14 bits for encoding the length of the - * value. And since small values are never stored as medium ones, we can - * extend the size range to cover that many longer values. - */ - @Deprecated - public static final int MEDIUM_LIMIT = (1 << (16 - 2)) + SMALL_LIMIT; - - /** - * Maximum size of small blob IDs. A small blob ID is stored in a value - * record whose length field contains the pattern "1110" in its most - * significant bits. Since two bytes are used to store both the bit pattern - * and the actual length of the blob ID, a maximum of 2^12 values can be - * stored in the length field. - */ - @Deprecated - public static final int BLOB_ID_SMALL_LIMIT = 1 << 12; - - @Deprecated - public static final int REF_COUNT_OFFSET = 5; - - static final int ROOT_COUNT_OFFSET = 6; - - static final int BLOBREF_COUNT_OFFSET = 8; - - private final SegmentTracker tracker; - - private final SegmentId id; - - private final ByteBuffer data; - - /** - * Version of the segment storage format. - */ - private final SegmentVersion version; - - /** - * Referenced segment identifiers. Entries are initialized lazily in - * {@link #getRefId(int)}. Set to {@code null} for bulk segments. - */ - private final SegmentId[] refids; - - /** - * String records read from segment. Used to avoid duplicate - * copies and repeated parsing of the same strings. - * - * @deprecated Superseded by {@link #stringCache} unless - * {@link SegmentTracker#DISABLE_STRING_CACHE} is {@code true}. - */ - @Deprecated - private final ConcurrentMap strings; - - private final Function loadString = new Function() { - @Nullable - @Override - public String apply(Integer offset) { - return loadString(offset); - } - }; - - /** - * Cache for string records or {@code null} if {@link #strings} is used for caching - */ - private final StringCache stringCache; - - /** - * Template records read from segment. Used to avoid duplicate - * copies and repeated parsing of the same templates. - */ - private final ConcurrentMap templates; - - private static final boolean DISABLE_TEMPLATE_CACHE = getBoolean("oak.segment.disableTemplateCache"); - - private volatile long accessed; - - /** - * Decode a 4 byte aligned segment offset. - * @param offset 4 byte aligned segment offset - * @return decoded segment offset - */ - @Deprecated - public static int decode(short offset) { - return (offset & 0xffff) << RECORD_ALIGN_BITS; - } - - /** - * Encode a segment offset into a 4 byte aligned address packed into a {@code short}. - * @param offset segment offset - * @return encoded segment offset packed into a {@code short} - */ - @Deprecated - public static short encode(int offset) { - return (short) (offset >> RECORD_ALIGN_BITS); - } - - /** - * Align an {@code address} on the given {@code boundary} - * - * @param address address to align - * @param boundary boundary to align to - * @return {@code n = address + a} such that {@code n % boundary == 0} and - * {@code 0 <= a < boundary}. - */ - @Deprecated - public static int align(int address, int boundary) { - return (address + boundary - 1) & ~(boundary - 1); - } - - @Deprecated - public Segment(SegmentTracker tracker, SegmentId id, ByteBuffer data) { - this(tracker, id, data, V_11); - } - - @Deprecated - public Segment(SegmentTracker tracker, final SegmentId id, final ByteBuffer data, SegmentVersion version) { - this.tracker = checkNotNull(tracker); - this.id = checkNotNull(id); - if (tracker.getStringCache() == null) { - strings = newConcurrentMap(); - stringCache = null; - } else { - strings = null; - stringCache = tracker.getStringCache(); - } - if (DISABLE_TEMPLATE_CACHE) { - templates = null; - } else { - templates = newConcurrentMap(); - } - this.data = checkNotNull(data); - if (id.isDataSegmentId()) { - byte segmentVersion = data.get(3); - checkState(data.get(0) == '0' - && data.get(1) == 'a' - && data.get(2) == 'K' - && SegmentVersion.isValid(segmentVersion), - new Object() { // Defer evaluation of error message - @Override - public String toString() { - return "Invalid segment format. Dumping segment " + id + "\n" - + toHex(data.array()); - } - }); - this.refids = new SegmentId[getRefCount()]; - this.refids[0] = id; - this.version = SegmentVersion.fromByte(segmentVersion); - } else { - this.refids = null; - this.version = version; - } - } - - private static String toHex(byte[] bytes) { - ByteArrayOutputStream out = new ByteArrayOutputStream(); - try { - HexDump.dump(bytes, 0, out, 0); - return out.toString(Charsets.UTF_8.name()); - } catch (IOException e) { - return "Error dumping segment: " + e.getMessage(); - } finally { - closeQuietly(out); - } - } - - Segment(SegmentTracker tracker, byte[] buffer) { - this.tracker = checkNotNull(tracker); - this.id = tracker.newDataSegmentId(); - if (tracker.getStringCache() == null) { - strings = newConcurrentMap(); - stringCache = null; - } else { - strings = null; - stringCache = tracker.getStringCache(); - } - if (DISABLE_TEMPLATE_CACHE) { - templates = null; - } else { - templates = newConcurrentMap(); - } - - this.data = ByteBuffer.wrap(checkNotNull(buffer)); - this.refids = new SegmentId[SEGMENT_REFERENCE_LIMIT + 1]; - this.refids[0] = id; - this.version = SegmentVersion.fromByte(buffer[3]); - this.id.setSegment(this); - } - - SegmentVersion getSegmentVersion() { - return version; - } - - /** - * Maps the given record offset to the respective position within the - * internal {@link #data} array. The validity of a record with the given - * length at the given offset is also verified. - * - * @param offset record offset - * @param length record length - * @return position within the data array - */ - private int pos(int offset, int length) { - checkPositionIndexes(offset, offset + length, MAX_SEGMENT_SIZE); - int pos = data.limit() - MAX_SEGMENT_SIZE + offset; - checkState(pos >= data.position()); - return pos; - } - - @Deprecated - public SegmentId getSegmentId() { - return id; - } - - int getRefCount() { - return (data.get(REF_COUNT_OFFSET) & 0xff) + 1; - } - - @Deprecated - public int getRootCount() { - return data.getShort(ROOT_COUNT_OFFSET) & 0xffff; - } - - @Deprecated - public RecordType getRootType(int index) { - int refCount = getRefCount(); - checkArgument(index < getRootCount()); - return RecordType.values()[data.get(data.position() + refCount * 16 + index * 3) & 0xff]; - } - - @Deprecated - public int getRootOffset(int index) { - int refCount = getRefCount(); - checkArgument(index < getRootCount()); - return (data.getShort(data.position() + refCount * 16 + index * 3 + 1) & 0xffff) - << RECORD_ALIGN_BITS; - } - - /** - * Returns the segment meta data of this segment or {@code null} if none is present. - *

- * The segment meta data is a string of the format {@code "{wid=W,sno=S,gc=G,t=T}"} - * where: - *

    - *
  • {@code W} is the writer id {@code wid},
  • - *
  • {@code S} is a unique, increasing sequence number corresponding to the allocation order - * of the segments in this store,
  • - *
  • {@code G} is the garbage collection generation (i.e. the number of compaction cycles - * that have been run),
  • - *
  • {@code T} is a time stamp according to {@link System#currentTimeMillis()}.
  • - *
- * @return the segment meta data - */ - @CheckForNull - @Deprecated - public String getSegmentInfo() { - if (getRootCount() == 0) { - return null; - } else { - return readString(getRootOffset(0)); - } - } - - SegmentId getRefId(int index) { - if (refids == null || index >= refids.length) { - String type = "data"; - if (!id.isDataSegmentId()) { - type = "bulk"; - } - long delta = System.currentTimeMillis() - id.getCreationTime(); - throw new IllegalStateException("RefId '" + index - + "' doesn't exist in " + type + " segment " + id - + ". Creation date delta is " + delta + " ms."); - } - SegmentId refid = refids[index]; - if (refid == null) { - synchronized (this) { - refid = refids[index]; - if (refid == null) { - int refpos = data.position() + index * 16; - long msb = data.getLong(refpos); - long lsb = data.getLong(refpos + 8); - refid = tracker.getSegmentId(msb, lsb); - refids[index] = refid; - } - } - } - return refid; - } - - @Deprecated - public List getReferencedIds() { - int refcount = getRefCount(); - List ids = newArrayListWithCapacity(refcount); - for (int refid = 0; refid < refcount; refid++) { - ids.add(getRefId(refid)); - } - return ids; - } - - @Deprecated - public int size() { - return data.remaining(); - } - - @Deprecated - public long getCacheSize() { - int size = 1024; - if (!data.isDirect()) { - size += size(); - } - if (id.isDataSegmentId()) { - size += size(); - } - return size; - } - - /** - * Writes this segment to the given output stream. - * - * @param stream stream to which this segment will be written - * @throws IOException on an IO error - */ - @Deprecated - public void writeTo(OutputStream stream) throws IOException { - ByteBuffer buffer = data.duplicate(); - WritableByteChannel channel = Channels.newChannel(stream); - while (buffer.hasRemaining()) { - channel.write(buffer); - } - } - - void collectBlobReferences(ReferenceCollector collector) { - int refcount = getRefCount(); - int rootcount = - data.getShort(data.position() + ROOT_COUNT_OFFSET) & 0xffff; - int blobrefcount = - data.getShort(data.position() + BLOBREF_COUNT_OFFSET) & 0xffff; - int blobrefpos = data.position() + refcount * 16 + rootcount * 3; - - for (int i = 0; i < blobrefcount; i++) { - int offset = (data.getShort(blobrefpos + i * 2) & 0xffff) << 2; - SegmentBlob blob = new SegmentBlob(new RecordId(id, offset)); - collector.addReference(blob.getBlobId(), null); - } - } - - byte readByte(int offset) { - return data.get(pos(offset, 1)); - } - - short readShort(int offset) { - return data.getShort(pos(offset, 2)); - } - - int readInt(int offset) { - return data.getInt(pos(offset, 4)); - } - - long readLong(int offset) { - return data.getLong(pos(offset, 8)); - } - - /** - * Reads the given number of bytes starting from the given position - * in this segment. - * - * @param position position within segment - * @param buffer target buffer - * @param offset offset within target buffer - * @param length number of bytes to read - */ - void readBytes(int position, byte[] buffer, int offset, int length) { - checkNotNull(buffer); - checkPositionIndexes(offset, offset + length, buffer.length); - ByteBuffer d = data.duplicate(); - d.position(pos(position, length)); - d.get(buffer, offset, length); - } - - RecordId readRecordId(int offset) { - int pos = pos(offset, RECORD_ID_BYTES); - return internalReadRecordId(pos); - } - - private RecordId internalReadRecordId(int pos) { - SegmentId refid = getRefId(data.get(pos) & 0xff); - int offset = ((data.get(pos + 1) & 0xff) << 8) | (data.get(pos + 2) & 0xff); - return new RecordId(refid, offset << RECORD_ALIGN_BITS); - } - - static String readString(final RecordId id) { - final SegmentId segmentId = id.getSegmentId(); - StringCache cache = segmentId.getTracker().getStringCache(); - if (cache == null) { - return segmentId.getSegment().readString(id.getOffset()); - } else { - long msb = segmentId.getMostSignificantBits(); - long lsb = segmentId.getLeastSignificantBits(); - return cache.getString(msb, lsb, id.getOffset(), new Function() { - @Nullable - @Override - public String apply(Integer offset) { - return segmentId.getSegment().loadString(offset); - } - }); - } - } - - private String readString(int offset) { - if (stringCache != null) { - long msb = id.getMostSignificantBits(); - long lsb = id.getLeastSignificantBits(); - return stringCache.getString(msb, lsb, offset, loadString); - } else { - String string = strings.get(offset); - if (string == null) { - string = loadString(offset); - strings.putIfAbsent(offset, string); // only keep the first copy - } - return string; - } - } - - private String loadString(int offset) { - int pos = pos(offset, 1); - long length = internalReadLength(pos); - if (length < SMALL_LIMIT) { - byte[] bytes = new byte[(int) length]; - ByteBuffer buffer = data.duplicate(); - buffer.position(pos + 1); - buffer.get(bytes); - return new String(bytes, Charsets.UTF_8); - } else if (length < MEDIUM_LIMIT) { - byte[] bytes = new byte[(int) length]; - ByteBuffer buffer = data.duplicate(); - buffer.position(pos + 2); - buffer.get(bytes); - return new String(bytes, Charsets.UTF_8); - } else if (length < Integer.MAX_VALUE) { - int size = (int) ((length + BLOCK_SIZE - 1) / BLOCK_SIZE); - ListRecord list = - new ListRecord(internalReadRecordId(pos + 8), size); - SegmentStream stream = new SegmentStream( - new RecordId(id, offset), list, length); - try { - return stream.getString(); - } finally { - stream.close(); - } - } else { - throw new IllegalStateException("String is too long: " + length); - } - } - - MapRecord readMap(RecordId id) { - return new MapRecord(id); - } - - Template readTemplate(final RecordId id) { - return id.getSegment().readTemplate(id.getOffset()); - } - - private Template readTemplate(int offset) { - if (templates == null) { - return loadTemplate(offset); - } - Template template = templates.get(offset); - if (template == null) { - template = loadTemplate(offset); - templates.putIfAbsent(offset, template); // only keep the first copy - } - return template; - } - - private Template loadTemplate(int offset) { - int head = readInt(offset); - boolean hasPrimaryType = (head & (1 << 31)) != 0; - boolean hasMixinTypes = (head & (1 << 30)) != 0; - boolean zeroChildNodes = (head & (1 << 29)) != 0; - boolean manyChildNodes = (head & (1 << 28)) != 0; - int mixinCount = (head >> 18) & ((1 << 10) - 1); - int propertyCount = head & ((1 << 18) - 1); - offset += 4; - - PropertyState primaryType = null; - if (hasPrimaryType) { - RecordId primaryId = readRecordId(offset); - primaryType = PropertyStates.createProperty( - "jcr:primaryType", readString(primaryId), Type.NAME); - offset += RECORD_ID_BYTES; - } - - PropertyState mixinTypes = null; - if (hasMixinTypes) { - String[] mixins = new String[mixinCount]; - for (int i = 0; i < mixins.length; i++) { - RecordId mixinId = readRecordId(offset); - mixins[i] = readString(mixinId); - offset += RECORD_ID_BYTES; - } - mixinTypes = PropertyStates.createProperty( - "jcr:mixinTypes", Arrays.asList(mixins), Type.NAMES); - } - - String childName = Template.ZERO_CHILD_NODES; - if (manyChildNodes) { - childName = Template.MANY_CHILD_NODES; - } else if (!zeroChildNodes) { - RecordId childNameId = readRecordId(offset); - childName = readString(childNameId); - offset += RECORD_ID_BYTES; - } - - PropertyTemplate[] properties; - if (version.onOrAfter(V_11)) { - properties = readPropsV11(propertyCount, offset); - } else { - properties = readPropsV10(propertyCount, offset); - } - return new Template(primaryType, mixinTypes, properties, childName); - } - - private PropertyTemplate[] readPropsV10(int propertyCount, int offset) { - PropertyTemplate[] properties = new PropertyTemplate[propertyCount]; - for (int i = 0; i < propertyCount; i++) { - RecordId propertyNameId = readRecordId(offset); - offset += RECORD_ID_BYTES; - byte type = readByte(offset++); - properties[i] = new PropertyTemplate(i, readString(propertyNameId), - Type.fromTag(Math.abs(type), type < 0)); - } - return properties; - } - - private PropertyTemplate[] readPropsV11(int propertyCount, int offset) { - PropertyTemplate[] properties = new PropertyTemplate[propertyCount]; - if (propertyCount > 0) { - RecordId id = readRecordId(offset); - ListRecord propertyNames = new ListRecord(id, properties.length); - offset += RECORD_ID_BYTES; - for (int i = 0; i < propertyCount; i++) { - byte type = readByte(offset++); - properties[i] = new PropertyTemplate(i, - readString(propertyNames.getEntry(i)), Type.fromTag( - Math.abs(type), type < 0)); - } - } - return properties; - } - - long readLength(RecordId id) { - return id.getSegment().readLength(id.getOffset()); - } - - long readLength(int offset) { - return internalReadLength(pos(offset, 1)); - } - - private long internalReadLength(int pos) { - int length = data.get(pos++) & 0xff; - if ((length & 0x80) == 0) { - return length; - } else if ((length & 0x40) == 0) { - return ((length & 0x3f) << 8 - | data.get(pos++) & 0xff) - + SMALL_LIMIT; - } else { - return (((long) length & 0x3f) << 56 - | ((long) (data.get(pos++) & 0xff)) << 48 - | ((long) (data.get(pos++) & 0xff)) << 40 - | ((long) (data.get(pos++) & 0xff)) << 32 - | ((long) (data.get(pos++) & 0xff)) << 24 - | ((long) (data.get(pos++) & 0xff)) << 16 - | ((long) (data.get(pos++) & 0xff)) << 8 - | ((long) (data.get(pos++) & 0xff))) - + MEDIUM_LIMIT; - } - } - - //------------------------------------------------------------< Object >-- - - @Override - @Deprecated - public String toString() { - StringWriter string = new StringWriter(); - PrintWriter writer = new PrintWriter(string); - - int length = data.remaining(); - - writer.format("Segment %s (%d bytes)%n", id, length); - String segmentInfo = getSegmentInfo(); - if (segmentInfo != null) { - writer.format("Info: %s%n", segmentInfo); - } - if (id.isDataSegmentId()) { - writer.println("--------------------------------------------------------------------------"); - int refcount = getRefCount(); - for (int refid = 0; refid < refcount; refid++) { - writer.format("reference %02x: %s%n", refid, getRefId(refid)); - } - int rootcount = data.getShort(ROOT_COUNT_OFFSET) & 0xffff; - int pos = data.position() + refcount * 16; - for (int rootid = 0; rootid < rootcount; rootid++) { - writer.format( - "root %d: %s at %04x%n", rootid, - RecordType.values()[data.get(pos + rootid * 3) & 0xff], - data.getShort(pos + rootid * 3 + 1) & 0xffff); - } - int blobrefcount = data.getShort(BLOBREF_COUNT_OFFSET) & 0xffff; - pos += rootcount * 3; - for (int blobrefid = 0; blobrefid < blobrefcount; blobrefid++) { - int offset = data.getShort(pos + blobrefid * 2) & 0xffff; - SegmentBlob blob = new SegmentBlob( - new RecordId(id, offset << RECORD_ALIGN_BITS)); - writer.format( - "blobref %d: %s at %04x%n", blobrefid, - blob.getBlobId(), offset); - } - } - writer.println("--------------------------------------------------------------------------"); - int pos = data.limit() - ((length + 15) & ~15); - while (pos < data.limit()) { - writer.format("%04x: ", (MAX_SEGMENT_SIZE - data.limit() + pos) >> RECORD_ALIGN_BITS); - for (int i = 0; i < 16; i++) { - if (i > 0 && i % 4 == 0) { - writer.append(' '); - } - if (pos + i >= data.position()) { - byte b = data.get(pos + i); - writer.format("%02x ", b & 0xff); - } else { - writer.append(" "); - } - } - writer.append(' '); - for (int i = 0; i < 16; i++) { - if (pos + i >= data.position()) { - byte b = data.get(pos + i); - if (b >= ' ' && b < 127) { - writer.append((char) b); - } else { - writer.append('.'); - } - } else { - writer.append(' '); - } - } - writer.println(); - pos += 16; - } - writer.println("--------------------------------------------------------------------------"); - - writer.close(); - return string.toString(); - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentBlob.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentBlob.java deleted file mode 100644 index ef26c7a..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentBlob.java +++ /dev/null @@ -1,289 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Charsets.UTF_8; -import static com.google.common.collect.Sets.newHashSet; -import static java.util.Collections.emptySet; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.MEDIUM_LIMIT; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.SMALL_LIMIT; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentWriter.BLOCK_SIZE; - -import java.io.BufferedInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.Set; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; - -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.plugins.memory.AbstractBlob; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; - -/** - * A BLOB (stream of bytes). This is a record of type "VALUE". - */ -@Deprecated -public class SegmentBlob extends Record implements Blob { - - @Deprecated - public static Iterable getBulkSegmentIds(Blob blob) { - if (blob instanceof SegmentBlob) { - return ((SegmentBlob) blob).getBulkSegmentIds(); - } else { - return emptySet(); - } - } - - SegmentBlob(RecordId id) { - super(id); - } - - private InputStream getInlineStream( - Segment segment, int offset, int length) { - byte[] inline = new byte[length]; - segment.readBytes(offset, inline, 0, length); - return new SegmentStream(getRecordId(), inline); - } - - @Override @Nonnull - @Deprecated - public InputStream getNewStream() { - Segment segment = getSegment(); - int offset = getOffset(); - byte head = segment.readByte(offset); - if ((head & 0x80) == 0x00) { - // 0xxx xxxx: small value - return getInlineStream(segment, offset + 1, head); - } else if ((head & 0xc0) == 0x80) { - // 10xx xxxx: medium value - int length = (segment.readShort(offset) & 0x3fff) + SMALL_LIMIT; - return getInlineStream(segment, offset + 2, length); - } else if ((head & 0xe0) == 0xc0) { - // 110x xxxx: long value - long length = (segment.readLong(offset) & 0x1fffffffffffffffL) + MEDIUM_LIMIT; - int listSize = (int) ((length + BLOCK_SIZE - 1) / BLOCK_SIZE); - ListRecord list = new ListRecord( - segment.readRecordId(offset + 8), listSize); - return new SegmentStream(getRecordId(), list, length); - } else if ((head & 0xf0) == 0xe0) { - // 1110 xxxx: external value, short blob ID - return getNewStream(readShortBlobId(segment, offset, head)); - } else if ((head & 0xf8) == 0xf0) { - // 1111 0xxx: external value, long blob ID - return getNewStream(readLongBlobId(segment, offset)); - } else { - throw new IllegalStateException(String.format( - "Unexpected value record type: %02x", head & 0xff)); - } - } - - @Override - @Deprecated - public long length() { - Segment segment = getSegment(); - int offset = getOffset(); - byte head = segment.readByte(offset); - if ((head & 0x80) == 0x00) { - // 0xxx xxxx: small value - return head; - } else if ((head & 0xc0) == 0x80) { - // 10xx xxxx: medium value - return (segment.readShort(offset) & 0x3fff) + SMALL_LIMIT; - } else if ((head & 0xe0) == 0xc0) { - // 110x xxxx: long value - return (segment.readLong(offset) & 0x1fffffffffffffffL) + MEDIUM_LIMIT; - } else if ((head & 0xf0) == 0xe0) { - // 1110 xxxx: external value, short blob ID - return getLength(readShortBlobId(segment, offset, head)); - } else if ((head & 0xf8) == 0xf0) { - // 1111 0xxx: external value, long blob ID - return getLength(readLongBlobId(segment, offset)); - } else { - throw new IllegalStateException(String.format( - "Unexpected value record type: %02x", head & 0xff)); - } - } - - @Override - @CheckForNull - @Deprecated - public String getReference() { - String blobId = getBlobId(); - if (blobId != null) { - BlobStore blobStore = getSegment().getSegmentId().getTracker(). - getStore().getBlobStore(); - if (blobStore != null) { - return blobStore.getReference(blobId); - } else { - throw new IllegalStateException("Attempt to read external blob with blobId [" + blobId + "] " + - "without specifying BlobStore"); - } - } - return null; - } - - - @Override - @Deprecated - public String getContentIdentity() { - String blobId = getBlobId(); - if (blobId != null){ - return blobId; - } - return null; - } - - @Deprecated - public boolean isExternal() { - Segment segment = getSegment(); - int offset = getOffset(); - byte head = segment.readByte(offset); - // 1110 xxxx or 1111 0xxx: external value - return (head & 0xf0) == 0xe0 || (head & 0xf8) == 0xf0; - } - - @Deprecated - public String getBlobId() { - Segment segment = getSegment(); - int offset = getOffset(); - byte head = segment.readByte(offset); - if ((head & 0xf0) == 0xe0) { - // 1110 xxxx: external value, small blob ID - return readShortBlobId(segment, offset, head); - } else if ((head & 0xf8) == 0xf0) { - // 1111 0xxx: external value, long blob ID - return readLongBlobId(segment, offset); - } else { - return null; - } - } - - @Deprecated - public SegmentBlob clone(SegmentWriter writer, boolean cloneLargeBinaries) throws IOException { - Segment segment = getSegment(); - int offset = getOffset(); - byte head = segment.readByte(offset); - if ((head & 0x80) == 0x00) { - // 0xxx xxxx: small value - return writer.writeStream(new BufferedInputStream(getNewStream())); - } else if ((head & 0xc0) == 0x80) { - // 10xx xxxx: medium value - return writer.writeStream(new BufferedInputStream(getNewStream())); - } else if ((head & 0xe0) == 0xc0) { - // 110x xxxx: long value - if (cloneLargeBinaries) { - return writer.writeStream(new BufferedInputStream( - getNewStream())); - } else { - // this was the previous (default) behavior - long length = (segment.readLong(offset) & 0x1fffffffffffffffL) - + MEDIUM_LIMIT; - int listSize = (int) ((length + BLOCK_SIZE - 1) / BLOCK_SIZE); - ListRecord list = new ListRecord( - segment.readRecordId(offset + 8), listSize); - return writer.writeLargeBlob(length, list.getEntries()); - } - } else if ((head & 0xf0) == 0xe0) { - // 1110 xxxx: external value, short blob ID - return writer.writeExternalBlob(getBlobId()); - } else if ((head & 0xf8) == 0xf0) { - // 1111 0xxx: external value, long blob ID - return writer.writeExternalBlob(getBlobId()); - } else { - throw new IllegalStateException(String.format( - "Unexpected value record type: %02x", head & 0xff)); - } - } - - //------------------------------------------------------------< Object >-- - - @Override - @Deprecated - public boolean equals(Object object) { - if (object == this || fastEquals(this, object)) { - return true; - } else if (object instanceof SegmentBlob) { - SegmentBlob that = (SegmentBlob) object; - if (this.wasCompactedTo(that) || that.wasCompactedTo(this)) { - return true; - } - } - return object instanceof Blob - && AbstractBlob.equal(this, (Blob) object); - } - - @Override - @Deprecated - public int hashCode() { - return 0; - } - - //-----------------------------------------------------------< private >-- - - private static String readShortBlobId(Segment segment, int offset, byte head) { - int length = (head & 0x0f) << 8 | (segment.readByte(offset + 1) & 0xff); - byte[] bytes = new byte[length]; - segment.readBytes(offset + 2, bytes, 0, length); - return new String(bytes, UTF_8); - } - - private static String readLongBlobId(Segment segment, int offset) { - RecordId blobIdRecordId = segment.readRecordId(offset + 1); - return Segment.readString(blobIdRecordId); - } - - private Iterable getBulkSegmentIds() { - Segment segment = getSegment(); - int offset = getOffset(); - byte head = segment.readByte(offset); - if ((head & 0xe0) == 0xc0) { - // 110x xxxx: long value - long length = (segment.readLong(offset) & 0x1fffffffffffffffL) + MEDIUM_LIMIT; - int listSize = (int) ((length + BLOCK_SIZE - 1) / BLOCK_SIZE); - ListRecord list = new ListRecord( - segment.readRecordId(offset + 8), listSize); - Set ids = newHashSet(); - for (RecordId id : list.getEntries()) { - ids.add(id.getSegmentId()); - } - return ids; - } else { - return emptySet(); - } - } - - private Blob getBlob(String blobId) { - return getSegment().getSegmentId().getTracker().getStore().readBlob(blobId); - } - - private InputStream getNewStream(String blobId) { - return getBlob(blobId).getNewStream(); - } - - private long getLength(String blobId) { - long length = getBlob(blobId).length(); - - if (length == -1) { - throw new IllegalStateException(String.format("Unknown length of external binary: %s", blobId)); - } - - return length; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentBlobReferenceRetriever.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentBlobReferenceRetriever.java deleted file mode 100644 index f2e92d6..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentBlobReferenceRetriever.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import org.apache.jackrabbit.oak.plugins.blob.BlobReferenceRetriever; -import org.apache.jackrabbit.oak.plugins.blob.ReferenceCollector; - -/** - * Implementation of {@link BlobReferenceRetriever} to retrieve blob references from the - * {@link SegmentTracker}. - */ -@Deprecated -public class SegmentBlobReferenceRetriever implements BlobReferenceRetriever { - - private final SegmentTracker tracker; - - @Deprecated - public SegmentBlobReferenceRetriever(SegmentTracker tracker) { - this.tracker = tracker; - } - - @Override - @Deprecated - public void collectReferences(final ReferenceCollector collector) { - tracker.collectBlobReferences(collector); - } -} - diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentBufferWriter.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentBufferWriter.java deleted file mode 100644 index 8ff4750..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentBufferWriter.java +++ /dev/null @@ -1,396 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Charsets.UTF_8; -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Maps.newLinkedHashMap; -import static com.google.common.collect.Sets.newHashSet; -import static java.lang.System.arraycopy; -import static java.lang.System.currentTimeMillis; -import static java.lang.System.identityHashCode; -import static org.apache.jackrabbit.oak.plugins.segment.RecordWriters.newValueWriter; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.MAX_SEGMENT_SIZE; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.RECORD_ID_BYTES; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.SEGMENT_REFERENCE_LIMIT; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.align; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Collection; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class encapsulates the state of a segment being written. It provides methods - * for writing primitive data types and for pre-allocating buffer space in the current - * segment. Should the current segment not have enough space left the current segment - * is flushed and a fresh one is allocated. - *

- * The common usage pattern is: - *

- *    SegmentBufferWriter writer = ...
- *    writer.prepare(...)  // allocate buffer
- *    writer.writeXYZ(...)
- * 
- * The behaviour of this class is undefined should the pre-allocated buffer be - * overrun be calling any of the write methods. - */ -class SegmentBufferWriter { - private static final Logger LOG = LoggerFactory.getLogger(SegmentBufferWriter.class); - - /** - * The set of root records (i.e. ones not referenced by other records) - * in this segment. - */ - private final Map roots = newLinkedHashMap(); - - /** - * Identifiers of the external blob references stored in this segment. - */ - private final List blobrefs = newArrayList(); - - private final SegmentStore store; - - /** - * Version of the segment storage format. - */ - private final SegmentVersion version; - - /** - * Id of this writer. - */ - private final String wid; - - private final SegmentTracker tracker; - - /** - * The segment write buffer, filled from the end to the beginning - * (see OAK-629). - */ - private byte[] buffer; - - private Segment segment; - - /** - * The number of bytes already written (or allocated). Counted from - * the end of the buffer. - */ - private int length; - - /** - * Current write position within the buffer. Grows up when raw data - * is written, but shifted downwards by the prepare methods. - */ - private int position; - - public SegmentBufferWriter(SegmentStore store, SegmentVersion version, String wid) throws IOException { - this.store = store; - this.version = version; - this.wid = (wid == null - ? "w-" + identityHashCode(this) - : wid); - - this.tracker = store.getTracker(); - this.buffer = createNewBuffer(version); - newSegment(this.wid); - } - - /** - * Allocate a new segment and write the segment meta data. - * The segment meta data is a string of the format {@code "{wid=W,sno=S,gc=G,t=T}"} - * where: - *
    - *
  • {@code W} is the writer id {@code wid},
  • - *
  • {@code S} is a unique, increasing sequence number corresponding to the allocation order - * of the segments in this store,
  • - *
  • {@code G} is the garbage collection generation (i.e. the number of compaction cycles - * that have been run),
  • - *
  • {@code T} is a time stamp according to {@link System#currentTimeMillis()}.
  • - *
- * The segment meta data is guaranteed to be the first string record in a segment. - * @param wid the writer id - */ - private void newSegment(String wid) throws IOException { - this.segment = new Segment(tracker, buffer); - String metaInfo = "{\"wid\":\"" + wid + '"' + - ",\"sno\":" + tracker.getNextSegmentNo() + - ",\"gc\":" + tracker.getCompactionMap().getGeneration() + - ",\"t\":" + currentTimeMillis() + "}"; - - byte[] data = metaInfo.getBytes(UTF_8); - newValueWriter(data.length, data).write(this); - } - - static byte[] createNewBuffer(SegmentVersion v) { - byte[] buffer = new byte[Segment.MAX_SEGMENT_SIZE]; - buffer[0] = '0'; - buffer[1] = 'a'; - buffer[2] = 'K'; - buffer[3] = SegmentVersion.asByte(v); - buffer[4] = 0; // reserved - buffer[5] = 0; // refcount - return buffer; - } - - public void writeByte(byte value) { - buffer[position++] = value; - } - - public void writeShort(short value) { - buffer[position++] = (byte) (value >> 8); - buffer[position++] = (byte) value; - } - - public void writeInt(int value) { - buffer[position++] = (byte) (value >> 24); - buffer[position++] = (byte) (value >> 16); - buffer[position++] = (byte) (value >> 8); - buffer[position++] = (byte) value; - } - - public void writeLong(long value) { - writeInt((int) (value >> 32)); - writeInt((int) value); - } - - /** - * Write a record id, and marks the record id as referenced (removes it from - * the unreferenced set). - * - * @param listId the record id - */ - public void writeRecordId(RecordId listId) { - checkNotNull(listId); - roots.remove(listId); - - int offset = listId.getOffset(); - checkState(0 <= offset && offset < MAX_SEGMENT_SIZE); - checkState(offset == align(offset, 1 << Segment.RECORD_ALIGN_BITS)); - - buffer[position++] = (byte) getSegmentRef(listId.getSegmentId()); - buffer[position++] = (byte) (offset >> (8 + Segment.RECORD_ALIGN_BITS)); - buffer[position++] = (byte) (offset >> Segment.RECORD_ALIGN_BITS); - } - - private int getSegmentRef(SegmentId segmentId) { - int refCount = segment.getRefCount(); - if (refCount > SEGMENT_REFERENCE_LIMIT) { - throw new SegmentOverflowException( - "Segment cannot have more than 255 references " + segment.getSegmentId()); - } - for (int index = 0; index < refCount; index++) { - if (segmentId.equals(segment.getRefId(index))) { - return index; - } - } - - ByteBuffer.wrap(buffer, refCount * 16, 16) - .putLong(segmentId.getMostSignificantBits()) - .putLong(segmentId.getLeastSignificantBits()); - buffer[Segment.REF_COUNT_OFFSET] = (byte) refCount; - return refCount; - } - - public void writeBytes(byte[] data, int offset, int length) { - arraycopy(data, offset, buffer, position, length); - position += length; - } - - public void addBlobRef(RecordId blobId) { - blobrefs.add(blobId); - } - - /** - * Adds a segment header to the buffer and writes a segment to the segment - * store. This is done automatically (called from prepare) when there is not - * enough space for a record. It can also be called explicitly. - */ - public void flush() throws IOException { - if (length > 0) { - int refcount = segment.getRefCount(); - - int rootcount = roots.size(); - buffer[Segment.ROOT_COUNT_OFFSET] = (byte) (rootcount >> 8); - buffer[Segment.ROOT_COUNT_OFFSET + 1] = (byte) rootcount; - - int blobrefcount = blobrefs.size(); - buffer[Segment.BLOBREF_COUNT_OFFSET] = (byte) (blobrefcount >> 8); - buffer[Segment.BLOBREF_COUNT_OFFSET + 1] = (byte) blobrefcount; - - length = align( - refcount * 16 + rootcount * 3 + blobrefcount * 2 + length, - 16); - - checkState(length <= buffer.length); - - int pos = refcount * 16; - if (pos + length <= buffer.length) { - // the whole segment fits to the space *after* the referenced - // segment identifiers we've already written, so we can safely - // copy those bits ahead even if concurrent code is still - // reading from that part of the buffer - arraycopy(buffer, 0, buffer, buffer.length - length, pos); - pos += buffer.length - length; - } else { - // this might leave some empty space between the header and - // the record data, but this case only occurs when the - // segment is >252kB in size and the maximum overhead is <<4kB, - // which is acceptable - length = buffer.length; - } - - for (Map.Entry entry : roots.entrySet()) { - int offset = entry.getKey().getOffset(); - buffer[pos++] = (byte) entry.getValue().ordinal(); - buffer[pos++] = (byte) (offset >> (8 + Segment.RECORD_ALIGN_BITS)); - buffer[pos++] = (byte) (offset >> Segment.RECORD_ALIGN_BITS); - } - - for (RecordId blobref : blobrefs) { - int offset = blobref.getOffset(); - buffer[pos++] = (byte) (offset >> (8 + Segment.RECORD_ALIGN_BITS)); - buffer[pos++] = (byte) (offset >> Segment.RECORD_ALIGN_BITS); - } - - SegmentId segmentId = segment.getSegmentId(); - int segmentOffset = buffer.length - length; - - LOG.debug("Writing data segment {} ({} bytes)", segmentId, length); - store.writeSegment(segmentId, buffer, segmentOffset, length); - - // Keep this segment in memory as it's likely to be accessed soon - ByteBuffer data; - if (segmentOffset > 4096) { - data = ByteBuffer.allocate(length); - data.put(buffer, segmentOffset, length); - data.rewind(); - } else { - data = ByteBuffer.wrap(buffer, segmentOffset, length); - } - - // It is important to put the segment into the cache only *after* it has been - // written to the store since as soon as it is in the cache it becomes eligible - // for eviction, which might lead to SNFEs when it is not yet in the store at that point. - tracker.setSegment(segmentId, new Segment(tracker, segmentId, data)); - - buffer = createNewBuffer(version); - roots.clear(); - blobrefs.clear(); - length = 0; - position = buffer.length; - newSegment(wid); - } - } - - /** - * Before writing a record (which are written backwards, from the end of the - * file to the beginning), this method is called, to ensure there is enough - * space. A new segment is also created if there is not enough space in the - * segment lookup table or elsewhere. - *

- * This method does not actually write into the segment, just allocates the - * space (flushing the segment if needed and starting a new one), and sets - * the write position (records are written from the end to the beginning, - * but within a record from left to right). - * - * @param type the record type (only used for root records) - * @param size the size of the record, excluding the size used for the - * record ids - * @param ids the record ids - * @return a new record id - */ - public RecordId prepare(RecordType type, int size, Collection ids) throws IOException { - checkArgument(size >= 0); - checkNotNull(ids); - - int idCount = ids.size(); - int recordSize = align(size + idCount * RECORD_ID_BYTES, 1 << Segment.RECORD_ALIGN_BITS); - - // First compute the header and segment sizes based on the assumption - // that *all* identifiers stored in this record point to previously - // unreferenced segments. - int refCount = segment.getRefCount() + idCount; - int blobRefCount = blobrefs.size() + 1; - int rootCount = roots.size() + 1; - int headerSize = refCount * 16 + rootCount * 3 + blobRefCount * 2; - int segmentSize = align(headerSize + recordSize + length, 16); - - // If the size estimate looks too big, recompute it with a more - // accurate refCount value. We skip doing this when possible to - // avoid the somewhat expensive list and set traversals. - if (segmentSize > buffer.length - 1 - || refCount > Segment.SEGMENT_REFERENCE_LIMIT) { - refCount -= idCount; - - Set segmentIds = newHashSet(); - - // The set of old record ids in this segment - // that were previously root record ids, but will no longer be, - // because the record to be written references them. - // This needs to be a set, because the list of ids can - // potentially reference the same record multiple times - Set notRoots = new HashSet(); - for (RecordId recordId : ids) { - SegmentId segmentId = recordId.getSegmentId(); - if (!(segmentId.equals(segment.getSegmentId()))) { - segmentIds.add(segmentId); - } else if (roots.containsKey(recordId)) { - notRoots.add(recordId); - } - } - rootCount -= notRoots.size(); - - if (!segmentIds.isEmpty()) { - for (int refid = 1; refid < refCount; refid++) { - segmentIds.remove(segment.getRefId(refid)); - } - refCount += segmentIds.size(); - } - - headerSize = refCount * 16 + rootCount * 3 + blobRefCount * 2; - segmentSize = align(headerSize + recordSize + length, 16); - } - - if (segmentSize > buffer.length - 1 - || blobRefCount > 0xffff - || rootCount > 0xffff - || refCount > Segment.SEGMENT_REFERENCE_LIMIT) { - flush(); - } - - length += recordSize; - position = buffer.length - length; - checkState(position >= 0); - - RecordId id = new RecordId(segment.getSegmentId(), position); - roots.put(id, type); - return id; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCheckpointMBean.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCheckpointMBean.java deleted file mode 100644 index dbd8e10..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCheckpointMBean.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import java.util.Date; - -import javax.management.openmbean.OpenDataException; -import javax.management.openmbean.TabularDataSupport; - -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.util.AbstractCheckpointMBean; - -/** - * {@code CheckpointMBean} implementation for the {@code SegmentNodeStore}. - */ -@Deprecated -public class SegmentCheckpointMBean extends AbstractCheckpointMBean { - private final SegmentNodeStore store; - - @Deprecated - public SegmentCheckpointMBean(SegmentNodeStore store) { - this.store = store; - } - - @Override - protected void collectCheckpoints(TabularDataSupport tab) throws OpenDataException { - for (ChildNodeEntry cne : store.getCheckpoints().getChildNodeEntries()) { - String id = cne.getName(); - NodeState checkpoint = cne.getNodeState(); - String created = getDate(checkpoint, "created"); - String expires = getDate(checkpoint, "timestamp"); - tab.put(id, toCompositeData(id, created, expires, store.checkpointInfo(id))); - } - } - - private static String getDate(NodeState checkpoint, String name) { - PropertyState p = checkpoint.getProperty(name); - if (p == null) { - return "NA"; - } - - return new Date(p.getValue(Type.LONG)).toString(); - } - - @Override - @Deprecated - public String createCheckpoint(long lifetime) { - return store.checkpoint(lifetime); - } - - @Override - @Deprecated - public boolean releaseCheckpoint(String checkpoint) { - return store.release(checkpoint); - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentDiscoveryLiteDescriptors.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentDiscoveryLiteDescriptors.java deleted file mode 100644 index a4a4086..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentDiscoveryLiteDescriptors.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import javax.jcr.Value; - -import org.apache.jackrabbit.commons.SimpleValueFactory; -import org.apache.jackrabbit.oak.api.Descriptors; -import org.apache.jackrabbit.oak.plugins.identifier.ClusterRepositoryInfo; -import org.apache.jackrabbit.oak.spi.state.NodeStore; - -/** - * This provides the 'clusterView' repository descriptors - **/ -class SegmentDiscoveryLiteDescriptors implements Descriptors { - - /** - * Name of the repository descriptor via which the clusterView is published - which is the raison d'etre of the - * DocumentDiscoveryLiteService TODO: move this constant to a generic place for both segment and document - **/ - private static final String OAK_DISCOVERYLITE_CLUSTERVIEW = "oak.discoverylite.clusterview"; - - private final SimpleValueFactory factory = new SimpleValueFactory(); - - private final NodeStore store; - - SegmentDiscoveryLiteDescriptors(NodeStore store) { - this.store = store; - } - - @Override - public String[] getKeys() { - return new String[] {OAK_DISCOVERYLITE_CLUSTERVIEW}; - } - - @Override - public boolean isStandardDescriptor(String key) { - return OAK_DISCOVERYLITE_CLUSTERVIEW.equals(key); - } - - @Override - public boolean isSingleValueDescriptor(String key) { - return OAK_DISCOVERYLITE_CLUSTERVIEW.equals(key); - } - - @Override - public Value getValue(String key) { - if (!OAK_DISCOVERYLITE_CLUSTERVIEW.equals(key)) { - return null; - } - return factory.createValue(getClusterViewAsDescriptorValue()); - } - - @Override - public Value[] getValues(String key) { - if (!OAK_DISCOVERYLITE_CLUSTERVIEW.equals(key)) { - return null; - } - return new Value[] {getValue(key)}; - } - - private String getClusterViewAsDescriptorValue() { - // since currently segment node store is not running in a cluster - // we can hard-code a single-vm descriptor here: - // {"seq":4,"final":true,"me":1,"active":[1],"deactivating":[],"inactive":[2]} - // OAK-3672 : 'id' is now allowed to be null (supported by upper layers), - // and for tarMk we're doing exactly that (id==null) - indicating - // to upper layers that we're not really in a cluster and that - // this low level descriptor doesn't manage the 'cluster id' - // in such a case. - // OAK-4006: but ClusterRepositoryInfo now provides a persistent clusterId, - // so that is now used also for discovery-lite via exactly below 'id' - String clusterId = ClusterRepositoryInfo.getOrCreateId(store); - return "{\"seq\":1,\"final\":true,\"me\":1,\"id\":\"" + clusterId + "\",\"active\":[1],\"deactivating\":[],\"inactive\":[]}"; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentGraph.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentGraph.java deleted file mode 100644 index 53d6266..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentGraph.java +++ /dev/null @@ -1,572 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Throwables.getStackTraceAsString; -import static com.google.common.collect.Maps.newHashMap; -import static com.google.common.collect.Sets.newHashSet; -import static java.lang.String.valueOf; -import static java.util.Collections.singletonMap; -import static java.util.regex.Pattern.compile; -import static org.apache.jackrabbit.oak.commons.IOUtils.closeQuietly; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentId.isDataSegmentId; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.OutputStream; -import java.io.PrintWriter; -import java.io.StringReader; -import java.util.Date; -import java.util.HashSet; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.UUID; -import java.util.regex.Pattern; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; - -import com.google.common.base.Function; -import com.google.common.base.Functions; -import com.google.common.base.Predicate; -import com.google.common.base.Predicates; -import com.google.common.collect.HashMultiset; -import com.google.common.collect.Multiset; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.commons.json.JsonObject; -import org.apache.jackrabbit.oak.commons.json.JsopTokenizer; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore.ReadOnlyStore; - -/** - * Utility graph for parsing a segment graph. - */ -@Deprecated -public final class SegmentGraph { - private SegmentGraph() { } - - /** - * Visitor for receiving call backs while traversing the - * segment graph. - */ - @Deprecated - public interface SegmentGraphVisitor { - - /** - * A call to this method indicates that the {@code from} segment - * references the {@code to} segment. Or if {@code to} is {@code null} - * that the {@code from} has no references. - * - * @param from - * @param to - */ - void accept(@Nonnull UUID from, @CheckForNull UUID to); - } - - /** - * A simple graph representation for a graph with node of type {@code T}. - */ - @Deprecated - public static class Graph { - /** The vertices of this graph */ - private final Set vertices = newHashSet(); - - /** The edges of this graph */ - private final Map> edges = newHashMap(); - - private void addVertex(T vertex) { - vertices.add(vertex); - } - - private void addEdge(T from, T to) { - Multiset tos = edges.get(from); - if (tos == null) { - tos = HashMultiset.create(); - edges.put(from, tos); - } - tos.add(to); - } - - /** - * @return the vertices of this graph - */ - @Deprecated - public Iterable vertices() { - return vertices; - } - - /** - * @param vertex - * @return {@code true} iff this graph contains {@code vertex} - */ - @Deprecated - public boolean containsVertex(T vertex) { - return vertices.contains(vertex); - } - - /** - * @return the edges of this graph - */ - @Deprecated - public Set>> edges() { - return edges.entrySet(); - } - - /** - * @param from - * @return the edges from {@code from} or {@code null} if none. - */ - @Deprecated - public Multiset getEdge(T from) { - return edges.get(from); - } - } - - /** - * Write the segment graph of a file store to a stream. - *

- * The graph is written in - * the Guess GDF format, - * which is easily imported into Gephi. - * As GDF only supports integers but the segment time stamps are encoded as long - * the {@code epoch} argument is used as a negative offset translating all timestamps - * into a valid int range. - * - * @param fileStore file store to graph - * @param out stream to write the graph to - * @param epoch epoch (in milliseconds) - * @param pattern regular expression specifying inclusion of nodes or {@code null} - * for all nodes. - * @throws Exception - */ - @Deprecated - public static void writeSegmentGraph( - @Nonnull ReadOnlyStore fileStore, - @Nonnull OutputStream out, - @Nonnull Date epoch, - @CheckForNull String pattern) throws Exception { - checkNotNull(epoch); - PrintWriter writer = new PrintWriter(checkNotNull(out)); - try { - SegmentNodeState root = checkNotNull(fileStore).getHead(); - - Predicate filter = pattern == null - ? Predicates.alwaysTrue() - : createRegExpFilter(pattern, fileStore.getTracker()); - Graph segmentGraph = parseSegmentGraph(fileStore, filter); - Graph headGraph = parseHeadGraph(root.getRecordId()); - - writer.write("nodedef>name VARCHAR, label VARCHAR, type VARCHAR, wid VARCHAR, gc INT, t INT, size INT, head BOOLEAN\n"); - for (UUID segment : segmentGraph.vertices()) { - writeNode(segment, writer, headGraph.containsVertex(segment), epoch, fileStore.getTracker()); - } - - writer.write("edgedef>node1 VARCHAR, node2 VARCHAR, head BOOLEAN\n"); - for (Entry> edge : segmentGraph.edges()) { - UUID from = edge.getKey(); - for (UUID to : edge.getValue()) { - if (!from.equals(to)) { - Multiset he = headGraph.getEdge(from); - boolean inHead = he != null && he.contains(to); - writer.write(from + "," + to + "," + inHead + "\n"); - } - } - } - } finally { - writer.close(); - } - } - - /** - * Create a regular expression based inclusion filter for segment. - * - * @param pattern regular expression specifying inclusion of nodes. - * @param tracker the segment tracker of the store acting upon. - * @return - */ - @Deprecated - public static Predicate createRegExpFilter( - @Nonnull String pattern, - @Nonnull final SegmentTracker tracker) { - final Pattern regExp = compile(checkNotNull(pattern)); - checkNotNull(tracker); - - return new Predicate() { - @Override - public boolean apply(UUID segment) { - try { - String info = getSegmentInfo(segment, tracker); - if (info == null) { - info = "NULL"; - } - return regExp.matcher(info).matches(); - } catch (Exception e) { - System.err.println("Error accessing segment " + segment + ": " + e); - return false; - } - } - }; - } - - /** - * Parse the segment graph of a file store. - * - * @param fileStore file store to parse - * @param filter inclusion criteria for vertices and edges. An edge is only included if - * both its source and target vertex are included. - * @return the segment graph rooted as the segment containing the head node - * state of {@code fileStore}. - * @throws IOException - */ - @Nonnull - @Deprecated - public static Graph parseSegmentGraph( - @Nonnull ReadOnlyStore fileStore, - @Nonnull Predicate filter) throws IOException { - SegmentNodeState root = checkNotNull(fileStore).getHead(); - HashSet roots = newHashSet(root.getRecordId().asUUID()); - return parseSegmentGraph(fileStore, roots, filter, Functions.identity()); - } - - /** - * Write the gc generation graph of a file store to a stream. - *

- * The graph is written in - * the Guess GDF format, - * which is easily imported into Gephi. - * - * @param fileStore file store to graph - * @param out stream to write the graph to - * @throws Exception - */ - @Deprecated - public static void writeGCGraph(@Nonnull ReadOnlyStore fileStore, @Nonnull OutputStream out) - throws Exception { - PrintWriter writer = new PrintWriter(checkNotNull(out)); - try { - Graph gcGraph = parseGCGraph(checkNotNull(fileStore)); - - writer.write("nodedef>name VARCHAR\n"); - for (String gen : gcGraph.vertices()) { - writer.write(gen + "\n"); - } - - writer.write("edgedef>node1 VARCHAR, node2 VARCHAR, weight INT\n"); - for (Entry> edge : gcGraph.edges()) { - String from = edge.getKey(); - Multiset tos = edge.getValue(); - for (String to : tos.elementSet()) { - if (!from.equals(to) && !to.isEmpty()) { - writer.write(from + "," + to + "," + tos.count(to) + "\n"); - } - } - } - } finally { - writer.close(); - } - } - - /** - * Parse the gc generation graph of a file store. - * - * @param fileStore file store to parse - * @return the gc generation graph rooted ad the segment containing the head node - * state of {@code fileStore}. - * @throws IOException - */ - @Nonnull - @Deprecated - public static Graph parseGCGraph(@Nonnull final ReadOnlyStore fileStore) - throws IOException { - SegmentNodeState root = checkNotNull(fileStore).getHead(); - HashSet roots = newHashSet(root.getRecordId().asUUID()); - return parseSegmentGraph(fileStore, roots, Predicates.alwaysTrue(), new Function() { - @Override @Nullable - public String apply(UUID segmentId) { - Map info = getSegmentInfoMap(segmentId, fileStore.getTracker()); - if (info != null) { - String error = info.get("error"); - if (error != null) { - return "Error"; - } else { - return info.get("gc"); - } - } else if (!isDataSegmentId(segmentId.getLeastSignificantBits())) { - return "bulk"; - } else { - return "null"; - } - } - }); - } - - /** - * Parse the segment graph of a file store starting with a given set of root segments. - * The full segment graph is mapped through the passed {@code map} to the - * graph returned by this function. - * - * @param fileStore file store to parse - * @param roots the initial set of segments - * @param map map defining an homomorphism from the segment graph into the returned graph - * @param filter inclusion criteria for vertices and edges. An edge is only included if - * both its source and target vertex are included. - * @return the segment graph of {@code fileStore} rooted at {@code roots} and mapped - * by {@code map} - * @throws IOException - */ - @Nonnull - @Deprecated - public static Graph parseSegmentGraph( - @Nonnull final ReadOnlyStore fileStore, - @Nonnull Set roots, - @Nonnull final Predicate filter, - @Nonnull final Function map) throws IOException { - final Graph graph = new Graph(); - - checkNotNull(filter); - checkNotNull(map); - checkNotNull(fileStore).traverseSegmentGraph(checkNotNull(roots), - new SegmentGraphVisitor() { - @Override - public void accept(@Nonnull UUID from, @CheckForNull UUID to) { - T fromT = null; - T toT = null; - if (filter.apply(from)) { - fromT = map.apply(from); - graph.addVertex(fromT); - } - if (to != null && filter.apply(to)) { - toT = map.apply(to); - graph.addVertex(toT); - } - if (fromT != null && toT != null) { - graph.addEdge(fromT, toT); - } - } - }); - return graph; - } - - /** - * Parser the head graph. The head graph is the sub graph of the segment - * graph containing the {@code root}. - * @param root - * @return the head graph of {@code root}. - */ - @Nonnull - @Deprecated - public static Graph parseHeadGraph(@Nonnull RecordId root) { - final Graph graph = new Graph(); - - try { - new SegmentParser() { - private void addEdge(RecordId from, RecordId to) { - graph.addVertex(from.asUUID()); - graph.addVertex(to.asUUID()); - graph.addEdge(from.asUUID(), to.asUUID()); - } - - @Override - protected void onNode(RecordId parentId, RecordId nodeId) { - super.onNode(parentId, nodeId); - addEdge(parentId, nodeId); - } - - @Override - protected void onTemplate(RecordId parentId, RecordId templateId) { - super.onTemplate(parentId, templateId); - addEdge(parentId, templateId); - } - - @Override - protected void onMap(RecordId parentId, RecordId mapId, MapRecord map) { - super.onMap(parentId, mapId, map); - addEdge(parentId, mapId); - } - - @Override - protected void onMapDiff(RecordId parentId, RecordId mapId, MapRecord map) { - super.onMapDiff(parentId, mapId, map); - addEdge(parentId, mapId); - } - - @Override - protected void onMapLeaf(RecordId parentId, RecordId mapId, MapRecord map) { - super.onMapLeaf(parentId, mapId, map); - addEdge(parentId, mapId); - } - - @Override - protected void onMapBranch(RecordId parentId, RecordId mapId, MapRecord map) { - super.onMapBranch(parentId, mapId, map); - addEdge(parentId, mapId); - } - - @Override - protected void onProperty(RecordId parentId, RecordId propertyId, PropertyTemplate template) { - super.onProperty(parentId, propertyId, template); - addEdge(parentId, propertyId); - } - - @Override - protected void onValue(RecordId parentId, RecordId valueId, Type type) { - super.onValue(parentId, valueId, type); - addEdge(parentId, valueId); - } - - @Override - protected void onBlob(RecordId parentId, RecordId blobId) { - super.onBlob(parentId, blobId); - addEdge(parentId, blobId); - } - - @Override - protected void onString(RecordId parentId, RecordId stringId) { - super.onString(parentId, stringId); - addEdge(parentId, stringId); - } - - @Override - protected void onList(RecordId parentId, RecordId listId, int count) { - super.onList(parentId, listId, count); - addEdge(parentId, listId); - } - - @Override - protected void onListBucket(RecordId parentId, RecordId listId, int index, int count, int capacity) { - super.onListBucket(parentId, listId, index, count, capacity); - addEdge(parentId, listId); - } - }.parseNode(checkNotNull(root)); - } catch (SegmentNotFoundException e) { - System.err.println("Error head graph parsing: " + e); - } - return graph; - } - - private static void writeNode(UUID node, PrintWriter writer, boolean inHead, Date epoch, SegmentTracker tracker) { - Map sInfo = getSegmentInfoMap(node, tracker); - if (!sInfo.containsKey("t")) { - writer.write(node + ",b,bulk,b,-1,-1," + inHead + "\n"); - } else { - String error = sInfo.get("error"); - if (error != null) { - writer.write(node + - "," + firstLine(error) + - ",error,e,-1,-1," + inHead + "\n"); - } else { - long t = asLong(sInfo.get("t")); - long ts = t - epoch.getTime(); - checkArgument(ts >= Integer.MIN_VALUE && ts <= Integer.MAX_VALUE, - "Time stamp (" + new Date(t) + ") not in epoch (" + - new Date(epoch.getTime() + Integer.MIN_VALUE) + " - " + - new Date(epoch.getTime() + Integer.MAX_VALUE) + ")"); - writer.write(node + - "," + sInfo.get("sno") + - ",data" + - "," + sInfo.get("wid") + - "," + sInfo.get("gc") + - "," + ts + - "," + sInfo.get("size") + - "," + inHead + "\n"); - } - } - } - - private static String firstLine(String string) { - BufferedReader reader = new BufferedReader(new StringReader(string)); - try { - return reader.readLine(); - } catch (IOException e) { - return string; - } finally { - closeQuietly(reader); - } - } - - private static long asLong(String string) { - return Long.valueOf(string); - } - - private static Map getSegmentInfoMap(UUID segment, SegmentTracker tracker) { - return new SegmentInfo(segment, tracker).getInfoMap(); - } - - private static String getSegmentInfo(UUID segment, SegmentTracker tracker) { - return new SegmentInfo(segment, tracker).getInfo(); - } - - private static class SegmentInfo { - private final UUID uuid; - private final SegmentTracker tracker; - - private SegmentId id; - - SegmentInfo(UUID uuid, SegmentTracker tracker) { - this.uuid = uuid; - this.tracker = tracker; - } - - boolean isData() { - return isDataSegmentId(uuid.getLeastSignificantBits()); - } - - SegmentId getSegmentId() { - if (id == null) { - id = tracker.getSegmentId( - uuid.getMostSignificantBits(), uuid.getLeastSignificantBits()); - } - return id; - } - - int getSize() { - return getSegmentId().getSegment().size(); - } - - String getInfo() { - if (isData()) { - return getSegmentId().getSegment().getSegmentInfo(); - } else { - return null; - } - } - - Map getInfoMap() { - try { - Map infoMap = newHashMap(); - String info = getInfo(); - if (info != null) { - JsopTokenizer tokenizer = new JsopTokenizer(info); - tokenizer.read('{'); - infoMap.putAll(JsonObject.create(tokenizer).getProperties()); - } - infoMap.put("size", valueOf(getSize())); - return infoMap; - } catch (SegmentNotFoundException e) { - return singletonMap("error", getStackTraceAsString(e)); - } - } - - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentId.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentId.java deleted file mode 100644 index c16bb81..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentId.java +++ /dev/null @@ -1,232 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static java.lang.Integer.getInteger; -import static java.lang.Integer.rotateLeft; - -import java.util.UUID; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Segment identifier. There are two types of segments: data segments, and bulk - * segments. Data segments have a header and may reference other segments; bulk - * segments do not. - */ -@Deprecated -public class SegmentId implements Comparable { - - /** Logger instance */ - private static final Logger log = LoggerFactory.getLogger(SegmentId.class); - - /** - * Sample rate bit mask of {@link SegmentTracker#segmentCache}. Lower values - * will cause more frequent accesses to that cache instead of the short - * circuit through {@link SegmentId#segment}. Access to that cache is slower - * but allows tracking access statistics. Should be 2^x - 1 (for example - * 1023, 255, 15,...). - */ - private static final int SEGMENT_CACHE_SAMPLE_MASK = getInteger("SegmentCacheSampleRate", 1023); - - /** - * The initial random value for the pseudo random number generator. Initial - * values of 0 - 0xffff will ensure a long period, but other values don't. - */ - private static volatile int random = (int) (System.currentTimeMillis() & 0xffff); - - /** - * Checks whether this is a data segment identifier. - * - * @return {@code true} for a data segment, {@code false} otherwise - */ - @Deprecated - public static boolean isDataSegmentId(long lsb) { - return (lsb >>> 60) == 0xAL; - } - - private final SegmentTracker tracker; - - private final long msb; - - private final long lsb; - - private long creationTime; - - /** - * A reference to the segment object, if it is available in memory. It is - * used for fast lookup. The segment tracker will set or reset this field. - *

- * Needs to be volatile so {@link #setSegment(Segment)} doesn't need to - * be synchronized as this would lead to deadlocks. - */ - private volatile Segment segment; - - private SegmentId(SegmentTracker tracker, long msb, long lsb, - Segment segment, long creationTime) { - this.tracker = tracker; - this.msb = msb; - this.lsb = lsb; - this.segment = segment; - this.creationTime = creationTime; - } - - @Deprecated - public SegmentId(SegmentTracker tracker, long msb, long lsb) { - this(tracker, msb, lsb, null, System.currentTimeMillis()); - } - - /** - * Checks whether this is a data segment identifier. - * - * @return {@code true} for a data segment, {@code false} otherwise - */ - @Deprecated - public boolean isDataSegmentId() { - return isDataSegmentId(lsb); - } - - /** - * Checks whether this is a bulk segment identifier. - * - * @return {@code true} for a bulk segment, {@code false} otherwise - */ - @Deprecated - public boolean isBulkSegmentId() { - return (lsb >>> 60) == 0xBL; - } - - @Deprecated - public boolean equals(long msb, long lsb) { - return this.msb == msb && this.lsb == lsb; - } - - @Deprecated - public long getMostSignificantBits() { - return msb; - } - - @Deprecated - public long getLeastSignificantBits() { - return lsb; - } - - /** - * Get a random integer. A fast, but lower quality pseudo random number - * generator is used. - * - * @return a random value. - */ - private static int randomInt() { - // There is a race here on concurrent access. However, given the usage the resulting - // bias seems preferable to the performance penalty of synchronization - return random = 0xc3e157c1 - rotateLeft(random, 19); - } - - @Deprecated - public Segment getSegment() { - // Sample the segment cache once in a while to get some cache hit/miss statistics - if ((randomInt() & SEGMENT_CACHE_SAMPLE_MASK) == 0) { - Segment segment = tracker.getCachedSegment(this); - if (segment != null) { - return segment; - } - } - - // Fall back to short circuit via this.segment if not in the cache - Segment segment = this.segment; - if (segment == null) { - synchronized (this) { - segment = this.segment; - if (segment == null) { - log.debug("Loading segment {}", this); - segment = tracker.readSegment(this); - } - } - } - return segment; - } - - void setSegment(Segment segment) { - this.segment = segment; - } - - @Deprecated - public SegmentTracker getTracker() { - return tracker; - } - - @Deprecated - public long getCreationTime() { - return creationTime; - } - - /** - * Pins this segment so it won't be cleaned by the {@code CLEAN_OLD} strategy. - */ - void pin() { - creationTime = Long.MAX_VALUE; - } - - /** - * @return this segment id as UUID - */ - @Deprecated - public UUID asUUID() { - return new UUID(msb, lsb); - } - - // --------------------------------------------------------< Comparable >-- - - @Override - @Deprecated - public int compareTo(SegmentId that) { - int d = Long.valueOf(this.msb).compareTo(Long.valueOf(that.msb)); - if (d == 0) { - d = Long.valueOf(this.lsb).compareTo(Long.valueOf(that.lsb)); - } - return d; - } - - // ------------------------------------------------------------< Object >-- - - @Override - @Deprecated - public String toString() { - return new UUID(msb, lsb).toString(); - } - - @Override - @Deprecated - public boolean equals(Object object) { - if (this == object) { - return true; - } else if (object instanceof SegmentId) { - SegmentId that = (SegmentId) object; - return msb == that.msb && lsb == that.lsb; - } - return false; - } - - @Override - @Deprecated - public int hashCode() { - return (int) lsb; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdTable.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdTable.java deleted file mode 100644 index 8e4eeba..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdTable.java +++ /dev/null @@ -1,266 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Maps.newHashMapWithExpectedSize; -import static java.util.Collections.nCopies; - -import java.lang.ref.WeakReference; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map; - -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Hash table of weak references to segment identifiers. - */ -@Deprecated -public class SegmentIdTable { - - /** - * The list of weak references to segment identifiers that are currently - * being accessed. This represents a hash table that uses open addressing - * with linear probing. It is not a hash map, to speed up read access. - *

- * The size of the table is always a power of two, so that we can use - * bitwise "and" instead of modulo. - *

- * The table is indexed by the random identifier bits, which guarantees - * uniform distribution of entries. - *

- * Open addressing with linear probing is used. Each table entry is either - * null (when there are no matching identifiers), a weak references to the - * matching identifier, or a weak reference to another identifier. - * There are no tombstone entries as there is no explicit remove operation, - * but a referent can become null if the entry is garbage collected. - *

- * The array is not sorted (we could; lookup might be faster, but adding - * entries would be slower). - */ - private final ArrayList> references = - newArrayList(nCopies(1024, (WeakReference) null)); - - private final SegmentTracker tracker; - - private static final Logger LOG = LoggerFactory.getLogger(SegmentIdTable.class); - - - /** - * The refresh count (for diagnostics and testing). - */ - private int rebuildCount; - - /** - * The number of used entries (WeakReferences) in this table. - */ - private int entryCount; - - SegmentIdTable(SegmentTracker tracker) { - this.tracker = tracker; - } - - /** - * Get the segment id, and reference it in the weak references map. - * - * @param msb - * @param lsb - * @return the segment id - */ - synchronized SegmentId getSegmentId(long msb, long lsb) { - int first = getIndex(lsb); - int index = first; - boolean shouldRefresh = false; - - WeakReference reference = references.get(index); - while (reference != null) { - SegmentId id = reference.get(); - if (id != null - && id.getMostSignificantBits() == msb - && id.getLeastSignificantBits() == lsb) { - return id; - } - // shouldRefresh if we have a garbage collected entry - shouldRefresh = shouldRefresh || id == null; - // open addressing / linear probing - index = (index + 1) % references.size(); - reference = references.get(index); - } - - SegmentId id = new SegmentId(tracker, msb, lsb); - references.set(index, new WeakReference(id)); - entryCount++; - if (entryCount > references.size() * 0.75) { - // more than 75% full - shouldRefresh = true; - } - if (shouldRefresh) { - refresh(); - } - return id; - } - - /** - * Returns all segment identifiers that are currently referenced in memory. - * - * @param ids referenced segment identifiers - */ - void collectReferencedIds(Collection ids) { - ids.addAll(refresh()); - } - - private synchronized Collection refresh() { - int size = references.size(); - Map> ids = - newHashMapWithExpectedSize(size); - - boolean hashCollisions = false; - boolean emptyReferences = false; - for (int i = 0; i < size; i++) { - WeakReference reference = references.get(i); - if (reference != null) { - SegmentId id = reference.get(); - if (id != null) { - ids.put(id, reference); - hashCollisions = hashCollisions || (i != getIndex(id)); - } else { - references.set(i, null); - entryCount--; - emptyReferences = true; - } - } - } - - if (entryCount != ids.size()) { - // something is wrong, possibly a concurrency problem, a SegmentId - // hashcode or equals bug, or a problem with this hash table - // algorithm - LOG.warn("Unexpected entry count mismatch, expected " + - entryCount + " got " + ids.size()); - // we fix the count, because having a wrong entry count would be - // very problematic; even worse than having a concurrency problem - entryCount = ids.size(); - } - - while (2 * ids.size() > size) { - size *= 2; - } - - // we need to re-build the table if the new size is different, - // but also if we removed some of the entries (because an entry was - // garbage collected) and there is at least one entry at the "wrong" - // location (due to open addressing) - if ((hashCollisions && emptyReferences) || size != references.size()) { - rebuildCount++; - references.clear(); - references.addAll(nCopies(size, (WeakReference) null)); - - for (Map.Entry> entry - : ids.entrySet()) { - int index = getIndex(entry.getKey()); - while (references.get(index) != null) { - index = (index + 1) % size; - } - references.set(index, entry.getValue()); - } - } - - return ids.keySet(); - } - - private int getIndex(SegmentId id) { - return getIndex(id.getLeastSignificantBits()); - } - - private int getIndex(long lsb) { - return ((int) lsb) & (references.size() - 1); - } - - synchronized void clearSegmentIdTables(CompactionStrategy strategy) { - int size = references.size(); - boolean dirty = false; - for (int i = 0; i < size; i++) { - WeakReference reference = references.get(i); - if (reference != null) { - SegmentId id = reference.get(); - if (id != null) { - if (strategy.canRemove(id)) { - // we clear the reference here, but we must not - // remove the reference from the list, because - // that could cause duplicate references - // (there is a unit test for this case) - reference.clear(); - dirty = true; - } - } - } - } - if (dirty) { - refresh(); - } - } - - /** - * Get the number of map rebuild operations (used for testing and diagnostics). - * - * @return the rebuild count - */ - int getMapRebuildCount() { - return rebuildCount; - } - - /** - * Get the entry count (used for testing and diagnostics). - * - * @return the entry count - */ - int getEntryCount() { - return entryCount; - } - - /** - * Get the size of the internal map (used for testing and diagnostics). - * - * @return the map size - */ - int getMapSize() { - return references.size(); - } - - /** - * Get the raw list of segment ids (used for testing). - * - * @return the raw list - */ - List getRawSegmentIdList() { - ArrayList list = new ArrayList(); - for (WeakReference ref : references) { - if (ref != null) { - SegmentId id = ref.get(); - if (id != null) { - list.add(id); - } - } - } - return list; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeBuilder.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeBuilder.java deleted file mode 100644 index 5ec655d..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeBuilder.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import java.io.IOException; -import java.io.InputStream; - -import javax.annotation.Nonnull; - -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.plugins.memory.MemoryNodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A node builder that keeps track of the number of updates - * (set property calls and so on). If there are too many updates, - * getNodeState() is called, which will write the records to the segment, - * and that might persist the changes (if the segment is flushed). - */ -@Deprecated -public class SegmentNodeBuilder extends MemoryNodeBuilder { - private static final Logger LOG = LoggerFactory.getLogger(SegmentNodeBuilder.class); - - /** - * Number of content updates that need to happen before the updates - * are automatically purged to the underlying segments. - */ - private static final int UPDATE_LIMIT = - Integer.getInteger("update.limit", 10000); - - private final SegmentWriter writer; - - /** - * Local update counter for the root builder. - * - * The value encodes both the counter and the type of the node builder: - *

    - *
  • value >= {@code 0} represents a root builder (builder keeps - * counter updates)
  • - *
  • value = {@code -1} represents a child builder (value doesn't - * change, builder doesn't keep an updated counter)
  • - *
- * - */ - private long updateCount; - - SegmentNodeBuilder(SegmentNodeState base) { - this(base, base.getTracker().getWriter()); - } - - SegmentNodeBuilder(SegmentNodeState base, SegmentWriter writer) { - super(base); - this.writer = writer; - this.updateCount = 0; - } - - private SegmentNodeBuilder(SegmentNodeBuilder parent, String name, - SegmentWriter writer) { - super(parent, name); - this.writer = writer; - this.updateCount = -1; - } - - /** - * @return {@code true} iff this builder has been acquired from a root node state. - */ - boolean isRootBuilder() { - return isRoot(); - } - - //-------------------------------------------------< MemoryNodeBuilder >-- - - @Override - protected void updated() { - if (isChildBuilder()) { - super.updated(); - } else { - updateCount++; - if (updateCount > UPDATE_LIMIT) { - getNodeState(); - } - } - } - - private boolean isChildBuilder() { - return updateCount < 0; - } - - //-------------------------------------------------------< NodeBuilder >-- - - @Nonnull - @Override - @Deprecated - public SegmentNodeState getNodeState() { - try { - NodeState state = super.getNodeState(); - SegmentNodeState sstate = writer.writeNode(state); - if (state != sstate) { - set(sstate); - updateCount = 0; - } - return sstate; - } catch (IOException e) { - LOG.error("Error flushing changes", e); - throw new IllegalStateException("Unexpected IOException", e); - } - } - - @Override - protected MemoryNodeBuilder createChildBuilder(String name) { - return new SegmentNodeBuilder(this, name, writer); - } - - @Override - @Deprecated - public Blob createBlob(InputStream stream) throws IOException { - SegmentNodeState sns = getNodeState(); - return sns.getTracker().getWriter().writeStream(stream); - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java deleted file mode 100644 index 3aadb74..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java +++ /dev/null @@ -1,645 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.collect.Lists.newArrayListWithCapacity; -import static java.util.Collections.emptyList; -import static java.util.Collections.singletonList; -import static org.apache.jackrabbit.JcrConstants.JCR_MIXINTYPES; -import static org.apache.jackrabbit.JcrConstants.JCR_PRIMARYTYPE; -import static org.apache.jackrabbit.oak.api.Type.BOOLEAN; -import static org.apache.jackrabbit.oak.api.Type.LONG; -import static org.apache.jackrabbit.oak.api.Type.NAME; -import static org.apache.jackrabbit.oak.api.Type.NAMES; -import static org.apache.jackrabbit.oak.api.Type.STRING; -import static org.apache.jackrabbit.oak.api.Type.STRINGS; -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.MISSING_NODE; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.V_11; -import static org.apache.jackrabbit.oak.spi.state.AbstractNodeState.checkValidName; - -import java.util.Collections; -import java.util.List; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; - -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState; -import org.apache.jackrabbit.oak.plugins.memory.MemoryChildNodeEntry; -import org.apache.jackrabbit.oak.spi.state.AbstractNodeState; -import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStateDiff; - -/** - * A record of type "NODE". This class can read a node record from a segment. It - * currently doesn't cache data (but the template is fully loaded). - */ -@Deprecated -public class SegmentNodeState extends Record implements NodeState { - - private volatile RecordId templateId = null; - - private volatile Template template = null; - - @Deprecated - public SegmentNodeState(RecordId id) { - super(id); - } - - RecordId getTemplateId() { - if (templateId == null) { - // no problem if updated concurrently, - // as each concurrent thread will just get the same value - templateId = getSegment().readRecordId(getOffset(0)); - } - return templateId; - } - - Template getTemplate() { - if (template == null) { - // no problem if updated concurrently, - // as each concurrent thread will just get the same value - template = getSegment().readTemplate(getTemplateId()); - } - return template; - } - - MapRecord getChildNodeMap() { - Segment segment = getSegment(); - return segment.readMap(segment.readRecordId(getOffset(0, 1))); - } - - @Override - @Deprecated - public boolean exists() { - return true; - } - - @Override - @Deprecated - public long getPropertyCount() { - Template template = getTemplate(); - long count = template.getPropertyTemplates().length; - if (template.getPrimaryType() != null) { - count++; - } - if (template.getMixinTypes() != null) { - count++; - } - return count; - } - - @Override - @Deprecated - public boolean hasProperty(@Nonnull String name) { - checkNotNull(name); - Template template = getTemplate(); - if (JCR_PRIMARYTYPE.equals(name)) { - return template.getPrimaryType() != null; - } else if (JCR_MIXINTYPES.equals(name)) { - return template.getMixinTypes() != null; - } else { - return template.getPropertyTemplate(name) != null; - } - } - - @Override @CheckForNull - @Deprecated - public PropertyState getProperty(@Nonnull String name) { - checkNotNull(name); - Template template = getTemplate(); - PropertyState property = null; - if (JCR_PRIMARYTYPE.equals(name)) { - property = template.getPrimaryType(); - } else if (JCR_MIXINTYPES.equals(name)) { - property = template.getMixinTypes(); - } - if (property != null) { - return property; - } - - PropertyTemplate propertyTemplate = - template.getPropertyTemplate(name); - if (propertyTemplate != null) { - Segment segment = getSegment(); - RecordId id; - if (getSegment().getSegmentVersion().onOrAfter(V_11)) { - id = getRecordIdV11(segment, template, propertyTemplate); - } else { - id = getRecordIdV10(segment, template, propertyTemplate); - } - return new SegmentPropertyState(id, propertyTemplate); - } else { - return null; - } - } - - private RecordId getRecordIdV10(Segment segment, Template template, - PropertyTemplate propertyTemplate) { - int ids = 1 + propertyTemplate.getIndex(); - if (template.getChildName() != Template.ZERO_CHILD_NODES) { - ids++; - } - return segment.readRecordId(getOffset(0, ids)); - } - - private RecordId getRecordIdV11(Segment segment, Template template, - PropertyTemplate propertyTemplate) { - int ids = 1; - if (template.getChildName() != Template.ZERO_CHILD_NODES) { - ids++; - } - RecordId rid = segment.readRecordId(getOffset(0, ids)); - ListRecord pIds = new ListRecord(rid, - template.getPropertyTemplates().length); - return pIds.getEntry(propertyTemplate.getIndex()); - } - - @Override @Nonnull - @Deprecated - public Iterable getProperties() { - Template template = getTemplate(); - PropertyTemplate[] propertyTemplates = template.getPropertyTemplates(); - List list = - newArrayListWithCapacity(propertyTemplates.length + 2); - - PropertyState primaryType = template.getPrimaryType(); - if (primaryType != null) { - list.add(primaryType); - } - - PropertyState mixinTypes = template.getMixinTypes(); - if (mixinTypes != null) { - list.add(mixinTypes); - } - - Segment segment = getSegment(); - int ids = 1; - if (template.getChildName() != Template.ZERO_CHILD_NODES) { - ids++; - } - - if (segment.getSegmentVersion().onOrAfter(V_11)) { - if (propertyTemplates.length > 0) { - ListRecord pIds = new ListRecord( - segment.readRecordId(getOffset(0, ids)), - propertyTemplates.length); - for (int i = 0; i < propertyTemplates.length; i++) { - RecordId propertyId = pIds.getEntry(i); - list.add(new SegmentPropertyState(propertyId, - propertyTemplates[i])); - } - } - } else { - for (int i = 0; i < propertyTemplates.length; i++) { - RecordId propertyId = segment.readRecordId(getOffset(0, ids++)); - list.add(new SegmentPropertyState(propertyId, - propertyTemplates[i])); - } - } - - return list; - } - - @Override - @Deprecated - public boolean getBoolean(@Nonnull String name) { - return Boolean.TRUE.toString().equals(getValueAsString(name, BOOLEAN)); - } - - @Override - @Deprecated - public long getLong(String name) { - String value = getValueAsString(name, LONG); - if (value != null) { - return Long.parseLong(value); - } else { - return 0; - } - } - - @Override @CheckForNull - @Deprecated - public String getString(String name) { - return getValueAsString(name, STRING); - } - - @Override @Nonnull - @Deprecated - public Iterable getStrings(@Nonnull String name) { - return getValuesAsStrings(name, STRINGS); - } - - @Override @CheckForNull - @Deprecated - public String getName(@Nonnull String name) { - return getValueAsString(name, NAME); - } - - @Override @Nonnull - @Deprecated - public Iterable getNames(@Nonnull String name) { - return getValuesAsStrings(name, NAMES); - } - - /** - * Optimized value access method. Returns the string value of a property - * of a given non-array type. Returns {@code null} if the named property - * does not exist, or is of a different type than given. - * - * @param name property name - * @param type property type - * @return string value of the property, or {@code null} - */ - @CheckForNull - private String getValueAsString(String name, Type type) { - checkArgument(!type.isArray()); - - Template template = getTemplate(); - if (JCR_PRIMARYTYPE.equals(name)) { - PropertyState primary = template.getPrimaryType(); - if (primary != null) { - if (type == NAME) { - return primary.getValue(NAME); - } else { - return null; - } - } - } else if (JCR_MIXINTYPES.equals(name) - && template.getMixinTypes() != null) { - return null; - } - - PropertyTemplate propertyTemplate = - template.getPropertyTemplate(name); - if (propertyTemplate == null - || propertyTemplate.getType() != type) { - return null; - } - - Segment segment = getSegment(); - RecordId id; - if (getSegment().getSegmentVersion().onOrAfter(V_11)) { - id = getRecordIdV11(segment, template, propertyTemplate); - } else { - id = getRecordIdV10(segment, template, propertyTemplate); - } - return Segment.readString(id); - } - - /** - * Optimized value access method. Returns the string values of a property - * of a given array type. Returns an empty iterable if the named property - * does not exist, or is of a different type than given. - * - * @param name property name - * @param type property type - * @return string values of the property, or an empty iterable - */ - @Nonnull - private Iterable getValuesAsStrings(String name, Type type) { - checkArgument(type.isArray()); - - Template template = getTemplate(); - if (JCR_MIXINTYPES.equals(name)) { - PropertyState mixin = template.getMixinTypes(); - if (type == NAMES && mixin != null) { - return mixin.getValue(NAMES); - } else if (type == NAMES || mixin != null) { - return emptyList(); - } - } else if (JCR_PRIMARYTYPE.equals(name) - && template.getPrimaryType() != null) { - return emptyList(); - } - - PropertyTemplate propertyTemplate = - template.getPropertyTemplate(name); - if (propertyTemplate == null - || propertyTemplate.getType() != type) { - return emptyList(); - } - - Segment segment = getSegment(); - RecordId id; - if (getSegment().getSegmentVersion().onOrAfter(V_11)) { - id = getRecordIdV11(segment, template, propertyTemplate); - } else { - id = getRecordIdV10(segment, template, propertyTemplate); - } - segment = id.getSegment(); - int size = segment.readInt(id.getOffset()); - if (size == 0) { - return emptyList(); - } - - id = segment.readRecordId(id.getOffset() + 4); - if (size == 1) { - return singletonList(Segment.readString(id)); - } - - List values = newArrayListWithCapacity(size); - ListRecord list = new ListRecord(id, size); - for (RecordId value : list.getEntries()) { - values.add(Segment.readString(value)); - } - return values; - } - - @Override - @Deprecated - public long getChildNodeCount(long max) { - String childName = getTemplate().getChildName(); - if (childName == Template.ZERO_CHILD_NODES) { - return 0; - } else if (childName == Template.MANY_CHILD_NODES) { - return getChildNodeMap().size(); - } else { - return 1; - } - } - - @Override - @Deprecated - public boolean hasChildNode(@Nonnull String name) { - String childName = getTemplate().getChildName(); - if (childName == Template.ZERO_CHILD_NODES) { - return false; - } else if (childName == Template.MANY_CHILD_NODES) { - return getChildNodeMap().getEntry(name) != null; - } else { - return childName.equals(name); - } - } - - @Override @Nonnull - @Deprecated - public NodeState getChildNode(@Nonnull String name) { - String childName = getTemplate().getChildName(); - if (childName == Template.MANY_CHILD_NODES) { - MapEntry child = getChildNodeMap().getEntry(name); - if (child != null) { - return child.getNodeState(); - } - } else if (childName != Template.ZERO_CHILD_NODES - && childName.equals(name)) { - Segment segment = getSegment(); - RecordId childNodeId = segment.readRecordId(getOffset(0, 1)); - return new SegmentNodeState(childNodeId); - } - checkValidName(name); - return MISSING_NODE; - } - - @Override @Nonnull - @Deprecated - public Iterable getChildNodeNames() { - String childName = getTemplate().getChildName(); - if (childName == Template.ZERO_CHILD_NODES) { - return Collections.emptyList(); - } else if (childName == Template.MANY_CHILD_NODES) { - return getChildNodeMap().getKeys(); - } else { - return Collections.singletonList(childName); - } - } - - @Override @Nonnull - @Deprecated - public Iterable getChildNodeEntries() { - String childName = getTemplate().getChildName(); - if (childName == Template.ZERO_CHILD_NODES) { - return Collections.emptyList(); - } else if (childName == Template.MANY_CHILD_NODES) { - return getChildNodeMap().getEntries(); - } else { - Segment segment = getSegment(); - RecordId childNodeId = segment.readRecordId(getOffset(0, 1)); - return Collections.singletonList(new MemoryChildNodeEntry( - childName, new SegmentNodeState(childNodeId))); - } - } - - @Override @Nonnull - @Deprecated - public SegmentNodeBuilder builder() { - return new SegmentNodeBuilder(this); - } - - @Override - @Deprecated - public boolean compareAgainstBaseState(NodeState base, NodeStateDiff diff) { - if (this == base || fastEquals(this, base)) { - return true; // no changes - } else if (base == EMPTY_NODE || !base.exists()) { // special case - return EmptyNodeState.compareAgainstEmptyState(this, diff); - } else if (!(base instanceof SegmentNodeState)) { // fallback - return AbstractNodeState.compareAgainstBaseState(this, base, diff); - } - - SegmentNodeState that = (SegmentNodeState) base; - if (that.wasCompactedTo(this)) { - return true; // no changes during compaction - } - - Template beforeTemplate = that.getTemplate(); - RecordId beforeId = that.getRecordId(); - - Template afterTemplate = getTemplate(); - RecordId afterId = getRecordId(); - - // Compare type properties - if (!compareProperties( - beforeTemplate.getPrimaryType(), afterTemplate.getPrimaryType(), - diff)) { - return false; - } - if (!compareProperties( - beforeTemplate.getMixinTypes(), afterTemplate.getMixinTypes(), - diff)) { - return false; - } - - // Compare other properties, leveraging the ordering - int beforeIndex = 0; - int afterIndex = 0; - PropertyTemplate[] beforeProperties = - beforeTemplate.getPropertyTemplates(); - PropertyTemplate[] afterProperties = - afterTemplate.getPropertyTemplates(); - while (beforeIndex < beforeProperties.length - && afterIndex < afterProperties.length) { - int d = Integer.valueOf(afterProperties[afterIndex].hashCode()) - .compareTo(Integer.valueOf(beforeProperties[beforeIndex].hashCode())); - if (d == 0) { - d = afterProperties[afterIndex].getName().compareTo( - beforeProperties[beforeIndex].getName()); - } - PropertyState beforeProperty = null; - PropertyState afterProperty = null; - if (d < 0) { - afterProperty = - afterTemplate.getProperty(afterId, afterIndex++); - } else if (d > 0) { - beforeProperty = - beforeTemplate.getProperty(beforeId, beforeIndex++); - } else { - afterProperty = - afterTemplate.getProperty(afterId, afterIndex++); - beforeProperty = - beforeTemplate.getProperty(beforeId, beforeIndex++); - } - if (!compareProperties(beforeProperty, afterProperty, diff)) { - return false; - } - } - while (afterIndex < afterProperties.length) { - if (!diff.propertyAdded( - afterTemplate.getProperty(afterId, afterIndex++))) { - return false; - } - } - while (beforeIndex < beforeProperties.length) { - PropertyState beforeProperty = - beforeTemplate.getProperty(beforeId, beforeIndex++); - if (!diff.propertyDeleted(beforeProperty)) { - return false; - } - } - - String beforeChildName = beforeTemplate.getChildName(); - String afterChildName = afterTemplate.getChildName(); - if (afterChildName == Template.ZERO_CHILD_NODES) { - if (beforeChildName != Template.ZERO_CHILD_NODES) { - for (ChildNodeEntry entry - : beforeTemplate.getChildNodeEntries(beforeId)) { - if (!diff.childNodeDeleted( - entry.getName(), entry.getNodeState())) { - return false; - } - } - } - } else if (afterChildName != Template.MANY_CHILD_NODES) { - NodeState afterNode = - afterTemplate.getChildNode(afterChildName, afterId); - NodeState beforeNode = - beforeTemplate.getChildNode(afterChildName, beforeId); - if (!beforeNode.exists()) { - if (!diff.childNodeAdded(afterChildName, afterNode)) { - return false; - } - } else if (!fastEquals(afterNode, beforeNode)) { - if (!diff.childNodeChanged( - afterChildName, beforeNode, afterNode)) { - return false; - } - } - if (beforeChildName == Template.MANY_CHILD_NODES - || (beforeChildName != Template.ZERO_CHILD_NODES - && !beforeNode.exists())) { - for (ChildNodeEntry entry - : beforeTemplate.getChildNodeEntries(beforeId)) { - if (!afterChildName.equals(entry.getName())) { - if (!diff.childNodeDeleted( - entry.getName(), entry.getNodeState())) { - return false; - } - } - } - } - } else if (beforeChildName == Template.ZERO_CHILD_NODES) { - for (ChildNodeEntry entry - : afterTemplate.getChildNodeEntries(afterId)) { - if (!diff.childNodeAdded( - entry.getName(), entry.getNodeState())) { - return false; - } - } - } else if (beforeChildName != Template.MANY_CHILD_NODES) { - boolean beforeChildRemoved = true; - NodeState beforeChild = - beforeTemplate.getChildNode(beforeChildName, beforeId); - for (ChildNodeEntry entry - : afterTemplate.getChildNodeEntries(afterId)) { - String childName = entry.getName(); - NodeState afterChild = entry.getNodeState(); - if (beforeChildName.equals(childName)) { - beforeChildRemoved = false; - if (!fastEquals(afterChild, beforeChild) - && !diff.childNodeChanged( - childName, beforeChild, afterChild)) { - return false; - } - } else if (!diff.childNodeAdded(childName, afterChild)) { - return false; - } - } - if (beforeChildRemoved) { - if (!diff.childNodeDeleted(beforeChildName, beforeChild)) { - return false; - } - } - } else { - MapRecord afterMap = afterTemplate.getChildNodeMap(afterId); - MapRecord beforeMap = beforeTemplate.getChildNodeMap(beforeId); - return afterMap.compare(beforeMap, diff); - } - - return true; - } - - private static boolean compareProperties( - PropertyState before, PropertyState after, NodeStateDiff diff) { - if (before == null) { - return after == null || diff.propertyAdded(after); - } else if (after == null) { - return diff.propertyDeleted(before); - } else { - return before.equals(after) || diff.propertyChanged(before, after); - } - } - - //------------------------------------------------------------< Object >-- - - @Override - @Deprecated - public boolean equals(Object object) { - if (this == object || fastEquals(this, object)) { - return true; - } else if (object instanceof SegmentNodeState) { - SegmentNodeState that = (SegmentNodeState) object; - Template template = getTemplate(); - return template.equals(that.getTemplate()) - && template.compare(getRecordId(), that.getRecordId()); - } else { - return object instanceof NodeState - && AbstractNodeState.equals(this, (NodeState) object); // TODO - } - } - - @Override - @Deprecated - public String toString() { - return AbstractNodeState.toString(this); - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStore.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStore.java deleted file mode 100644 index b6cfd35..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStore.java +++ /dev/null @@ -1,683 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; -import static com.google.common.collect.Maps.newHashMap; -import static java.lang.System.currentTimeMillis; -import static java.lang.Thread.currentThread; -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.NANOSECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.apache.jackrabbit.oak.api.Type.LONG; -import static org.apache.jackrabbit.oak.api.Type.STRING; -import static org.apache.jackrabbit.oak.plugins.segment.Record.fastEquals; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.GAIN_THRESHOLD_DEFAULT; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.NO_COMPACTION; - -import java.io.Closeable; -import java.io.IOException; -import java.io.InputStream; -import java.util.Collections; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Random; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; - -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.spi.commit.ChangeDispatcher; -import org.apache.jackrabbit.oak.spi.commit.CommitHook; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.Observable; -import org.apache.jackrabbit.oak.spi.commit.Observer; -import org.apache.jackrabbit.oak.spi.state.ConflictAnnotatingRebaseDiff; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The top level class for the segment store. - *

- * The root node of the JCR content tree is actually stored in the node "/root", - * and checkpoints are stored under "/checkpoints". - */ -@Deprecated -public class SegmentNodeStore implements NodeStore, Observable { - - // FIXME OAK-4449: SegmentNodeStore and SegmentStore builders should log their parameters on build() - @Deprecated - public static class SegmentNodeStoreBuilder { - - private final SegmentStore store; - - private boolean isCreated; - - private CompactionStrategy compactionStrategy = NO_COMPACTION; - - private volatile SegmentNodeStore segmentNodeStore; - - private SegmentNodeStoreBuilder(@Nonnull SegmentStore store) { - this.store = store; - } - - SegmentNodeStoreBuilder withCompactionStrategy(CompactionStrategy compactionStrategy) { - this.compactionStrategy = compactionStrategy; - return this; - } - - SegmentNodeStoreBuilder withCompactionStrategy( - boolean pauseCompaction, - boolean cloneBinaries, - String cleanup, - long cleanupTs, - byte memoryThreshold, - final int lockWaitTime, - int retryCount, - boolean forceAfterFail, - boolean persistCompactionMap, - byte gainThreshold) { - - compactionStrategy = new CompactionStrategy( - pauseCompaction, - cloneBinaries, - CompactionStrategy.CleanupType.valueOf(cleanup), - cleanupTs, - memoryThreshold) { - - @Override - public boolean compacted(Callable setHead) throws Exception { - // Need to guard against concurrent commits to avoid - // mixed segments. See OAK-2192. - return segmentNodeStore.locked(setHead, lockWaitTime, SECONDS); - } - - }; - - compactionStrategy.setRetryCount(retryCount); - compactionStrategy.setForceAfterFail(forceAfterFail); - compactionStrategy.setPersistCompactionMap(persistCompactionMap); - compactionStrategy.setGainThreshold(gainThreshold); - - return this; - } - - CompactionStrategy getCompactionStrategy() { - checkState(isCreated); - return compactionStrategy; - } - - @Nonnull - @Deprecated - public SegmentNodeStore build() { - checkState(!isCreated); - isCreated = true; - segmentNodeStore = new SegmentNodeStore(this); - return segmentNodeStore; - } - - } - - @Nonnull - @Deprecated - public static SegmentNodeStoreBuilder builder(@Nonnull SegmentStore store) { - return new SegmentNodeStoreBuilder(checkNotNull(store)); - } - - private static final Logger log = LoggerFactory.getLogger(SegmentNodeStore.class); - - static final String ROOT = "root"; - - @Deprecated - public static final String CHECKPOINTS = "checkpoints"; - - private final SegmentStore store; - - private final ChangeDispatcher changeDispatcher; - - /** - * Local copy of the head of the journal associated with this store. - */ - private final AtomicReference head; - - /** - * Semaphore that controls access to the {@link #head} variable. - * Only a single local commit is allowed at a time. When such - * a commit is in progress, no external updates will be seen. - */ - private final Semaphore commitSemaphore; - - private long maximumBackoff = MILLISECONDS.convert(10, SECONDS); - - /** - * Sets the number of seconds to wait for the attempt to grab the lock to - * create a checkpoint - */ - private int checkpointsLockWaitTime = Integer.getInteger( - "oak.checkpoints.lockWaitTime", 10); - - /** - * Flag controlling the commit lock fairness - */ - private static final boolean commitFairLock = Boolean - .parseBoolean(System.getProperty("oak.segmentNodeStore.commitFairLock", "true")); - - private SegmentNodeStore(SegmentNodeStoreBuilder builder) { - if (commitFairLock) { - log.info("Initializing SegmentNodeStore with the commitFairLock option enabled."); - } - this.commitSemaphore = new Semaphore(1, commitFairLock); - this.store = builder.store; - this.head = new AtomicReference(store.getHead()); - this.changeDispatcher = new ChangeDispatcher(getRoot()); - } - - void setMaximumBackoff(long max) { - this.maximumBackoff = max; - } - - /** - * Execute the passed callable with trying to acquire this store's commit lock. - * @param c callable to execute - * @return {@code false} if the store's commit lock cannot be acquired, the result - * of {@code c.call()} otherwise. - * @throws Exception - */ - boolean locked(Callable c) throws Exception { - if (commitSemaphore.tryAcquire()) { - try { - return c.call(); - } finally { - commitSemaphore.release(); - } - } - return false; - } - - /** - * Execute the passed callable with trying to acquire this store's commit lock. - * @param timeout the maximum time to wait for the store's commit lock - * @param unit the time unit of the {@code timeout} argument - * @param c callable to execute - * @return {@code false} if the store's commit lock cannot be acquired, the result - * of {@code c.call()} otherwise. - * @throws Exception - */ - boolean locked(Callable c, long timeout, TimeUnit unit) throws Exception { - if (commitSemaphore.tryAcquire(timeout, unit)) { - try { - return c.call(); - } finally { - // Explicitly give up reference to the previous root state - // otherwise they would block cleanup. See OAK-3347 - refreshHead(true); - commitSemaphore.release(); - } - } - return false; - } - - /** - * Refreshes the head state. Should only be called while holding a - * permit from the {@link #commitSemaphore}. - */ - private void refreshHead(boolean dispatchChanges) { - SegmentNodeState state = store.getHead(); - if (!state.getRecordId().equals(head.get().getRecordId())) { - head.set(state); - if (dispatchChanges) { - changeDispatcher.contentChanged(state.getChildNode(ROOT), CommitInfo.EMPTY_EXTERNAL); - } - } - } - - @Override - @Deprecated - public Closeable addObserver(Observer observer) { - return changeDispatcher.addObserver(observer); - } - - @Override @Nonnull - @Deprecated - public NodeState getRoot() { - if (commitSemaphore.tryAcquire()) { - try { - refreshHead(true); - } finally { - commitSemaphore.release(); - } - } - return head.get().getChildNode(ROOT); - } - - @Nonnull - @Deprecated - public NodeState getSuperRoot() { - if (commitSemaphore.tryAcquire()) { - try { - refreshHead(true); - } finally { - commitSemaphore.release(); - } - } - return head.get(); - } - - @Override - @Deprecated - public NodeState merge( - @Nonnull NodeBuilder builder, @Nonnull CommitHook commitHook, - @Nonnull CommitInfo info) throws CommitFailedException { - checkArgument(builder instanceof SegmentNodeBuilder); - SegmentNodeBuilder snb = (SegmentNodeBuilder) builder; - checkArgument(snb.isRootBuilder()); - checkNotNull(commitHook); - - try { - commitSemaphore.acquire(); - try { - Commit commit = new Commit(snb, commitHook, info); - NodeState merged = commit.execute(); - snb.reset(merged); - return merged; - } finally { - commitSemaphore.release(); - } - } catch (InterruptedException e) { - currentThread().interrupt(); - throw new CommitFailedException( - "Segment", 2, "Merge interrupted", e); - } catch (SegmentOverflowException e) { - throw new CommitFailedException( - "Segment", 3, "Merge failed", e); - } - } - - @Override @Nonnull - @Deprecated - public NodeState rebase(@Nonnull NodeBuilder builder) { - checkArgument(builder instanceof SegmentNodeBuilder); - - SegmentNodeBuilder snb = (SegmentNodeBuilder) builder; - - NodeState root = getRoot(); - NodeState before = snb.getBaseState(); - if (!fastEquals(before, root)) { - SegmentNodeState after = snb.getNodeState(); - snb.reset(root); - after.compareAgainstBaseState( - before, new ConflictAnnotatingRebaseDiff(snb)); - } - - return snb.getNodeState(); - } - - @Override @Nonnull - @Deprecated - public NodeState reset(@Nonnull NodeBuilder builder) { - checkArgument(builder instanceof SegmentNodeBuilder); - - SegmentNodeBuilder snb = (SegmentNodeBuilder) builder; - - NodeState root = getRoot(); - snb.reset(root); - - return root; - } - - @Override - @Deprecated - public Blob createBlob(InputStream stream) throws IOException { - return store.getTracker().getWriter().writeStream(stream); - } - - @Override - @Deprecated - public Blob getBlob(@Nonnull String reference) { - //Use of 'reference' here is bit overloaded. In terms of NodeStore API - //a blob reference refers to the secure reference obtained from Blob#getReference() - //However in SegmentStore terminology a blob is referred via 'external reference' - //That 'external reference' would map to blobId obtained from BlobStore#getBlobId - BlobStore blobStore = store.getBlobStore(); - if (blobStore != null) { - String blobId = blobStore.getBlobId(reference); - if (blobId != null) { - return store.readBlob(blobId); - } - return null; - } - throw new IllegalStateException("Attempt to read external blob with blobId [" + reference + "] " + - "without specifying BlobStore"); - } - - @Nonnull - @Override - @Deprecated - public String checkpoint(long lifetime, @Nonnull Map properties) { - checkArgument(lifetime > 0); - checkNotNull(properties); - String name = UUID.randomUUID().toString(); - try { - CPCreator cpc = new CPCreator(name, lifetime, properties); - if (locked(cpc, checkpointsLockWaitTime, TimeUnit.SECONDS)) { - return name; - } - log.warn("Failed to create checkpoint {} in {} seconds.", name, - checkpointsLockWaitTime); - } catch (InterruptedException e) { - currentThread().interrupt(); - log.error("Failed to create checkpoint {}.", name, e); - } catch (Exception e) { - log.error("Failed to create checkpoint {}.", name, e); - } - return name; - } - - private final class CPCreator implements Callable { - - private final String name; - private final long lifetime; - private final Map properties; - - CPCreator(String name, long lifetime, Map properties) { - this.name = name; - this.lifetime = lifetime; - this.properties = properties; - } - - @Override - public Boolean call() { - long now = System.currentTimeMillis(); - - refreshHead(true); - - SegmentNodeState state = head.get(); - SegmentNodeBuilder builder = state.builder(); - - NodeBuilder checkpoints = builder.child("checkpoints"); - for (String n : checkpoints.getChildNodeNames()) { - NodeBuilder cp = checkpoints.getChildNode(n); - PropertyState ts = cp.getProperty("timestamp"); - if (ts == null || ts.getType() != LONG - || now > ts.getValue(LONG)) { - cp.remove(); - } - } - - NodeBuilder cp = checkpoints.child(name); - if (Long.MAX_VALUE - now > lifetime) { - cp.setProperty("timestamp", now + lifetime); - } else { - cp.setProperty("timestamp", Long.MAX_VALUE); - } - cp.setProperty("created", now); - - NodeBuilder props = cp.setChildNode("properties"); - for (Entry p : properties.entrySet()) { - props.setProperty(p.getKey(), p.getValue()); - } - cp.setChildNode(ROOT, state.getChildNode(ROOT)); - - SegmentNodeState newState = builder.getNodeState(); - if (store.setHead(state, newState)) { - refreshHead(false); - return true; - } else { - return false; - } - } - } - - @Override @Nonnull - @Deprecated - public synchronized String checkpoint(long lifetime) { - return checkpoint(lifetime, Collections.emptyMap()); - } - - @Nonnull - @Override - @Deprecated - public Map checkpointInfo(@Nonnull String checkpoint) { - Map properties = newHashMap(); - checkNotNull(checkpoint); - NodeState cp = head.get() - .getChildNode("checkpoints") - .getChildNode(checkpoint) - .getChildNode("properties"); - - for (PropertyState prop : cp.getProperties()) { - properties.put(prop.getName(), prop.getValue(STRING)); - } - - return properties; - } - - @Nonnull - @Override - @Deprecated - public Iterable checkpoints() { - return getCheckpoints().getChildNodeNames(); - } - - @Override @CheckForNull - @Deprecated - public NodeState retrieve(@Nonnull String checkpoint) { - checkNotNull(checkpoint); - NodeState cp = head.get() - .getChildNode("checkpoints") - .getChildNode(checkpoint) - .getChildNode(ROOT); - if (cp.exists()) { - return cp; - } - return null; - } - - @Override - @Deprecated - public boolean release(@Nonnull String checkpoint) { - checkNotNull(checkpoint); - - // try 5 times - for (int i = 0; i < 5; i++) { - if (commitSemaphore.tryAcquire()) { - try { - refreshHead(true); - - SegmentNodeState state = head.get(); - SegmentNodeBuilder builder = state.builder(); - - NodeBuilder cp = builder.child("checkpoints").child( - checkpoint); - if (cp.exists()) { - cp.remove(); - SegmentNodeState newState = builder.getNodeState(); - if (store.setHead(state, newState)) { - refreshHead(false); - return true; - } - } - } finally { - commitSemaphore.release(); - } - } - } - return false; - } - - NodeState getCheckpoints() { - return head.get().getChildNode(CHECKPOINTS); - } - - private class Commit { - - private final Random random = new Random(); - - private final NodeState before; - - private final SegmentNodeState after; - - private final CommitHook hook; - - private final CommitInfo info; - - Commit(@Nonnull SegmentNodeBuilder builder, - @Nonnull CommitHook hook, @Nonnull CommitInfo info) { - checkNotNull(builder); - this.before = builder.getBaseState(); - this.after = builder.getNodeState(); - - this.hook = checkNotNull(hook); - this.info = checkNotNull(info); - } - - private boolean setHead(SegmentNodeState before, SegmentNodeState after) { - refreshHead(true); - if (store.setHead(before, after)) { - head.set(after); - changeDispatcher.contentChanged(after.getChildNode(ROOT), info); - refreshHead(true); - return true; - } else { - return false; - } - } - - private SegmentNodeBuilder prepare(SegmentNodeState state) throws CommitFailedException { - SegmentNodeBuilder builder = state.builder(); - if (fastEquals(before, state.getChildNode(ROOT))) { - // use a shortcut when there are no external changes - builder.setChildNode( - ROOT, hook.processCommit(before, after, info)); - } else { - // there were some external changes, so do the full rebase - ConflictAnnotatingRebaseDiff diff = - new ConflictAnnotatingRebaseDiff(builder.child(ROOT)); - after.compareAgainstBaseState(before, diff); - // apply commit hooks on the rebased changes - builder.setChildNode(ROOT, hook.processCommit( - builder.getBaseState().getChildNode(ROOT), - builder.getNodeState().getChildNode(ROOT), - info)); - } - return builder; - } - - private long optimisticMerge() - throws CommitFailedException, InterruptedException { - long timeout = 1; - - // use exponential backoff in case of concurrent commits - for (long backoff = 1; backoff < maximumBackoff; backoff *= 2) { - long start = System.nanoTime(); - - refreshHead(true); - SegmentNodeState state = head.get(); - if (state.hasProperty("token") - && state.getLong("timeout") >= currentTimeMillis()) { - // someone else has a pessimistic lock on the journal, - // so we should not try to commit anything yet - } else { - SegmentNodeBuilder builder = prepare(state); - // use optimistic locking to update the journal - if (setHead(state, builder.getNodeState())) { - return -1; - } - } - - // someone else was faster, so wait a while and retry later - Thread.sleep(backoff, random.nextInt(1000000)); - - long stop = System.nanoTime(); - if (stop - start > timeout) { - timeout = stop - start; - } - } - - return MILLISECONDS.convert(timeout, NANOSECONDS); - } - - private void pessimisticMerge(long timeout) - throws CommitFailedException, InterruptedException { - while (true) { - long now = currentTimeMillis(); - SegmentNodeState state = head.get(); - if (state.hasProperty("token") - && state.getLong("timeout") >= now) { - // locked by someone else, wait until unlocked or expired - Thread.sleep( - Math.min(state.getLong("timeout") - now, 1000), - random.nextInt(1000000)); - } else { - // attempt to acquire the lock - SegmentNodeBuilder builder = state.builder(); - builder.setProperty("token", UUID.randomUUID().toString()); - builder.setProperty("timeout", now + timeout); - - if (setHead(state, builder.getNodeState())) { - // lock acquired; rebase, apply commit hooks, and unlock - builder = prepare(state); - builder.removeProperty("token"); - builder.removeProperty("timeout"); - - // complete the commit - if (setHead(state, builder.getNodeState())) { - return; - } - } - } - } - } - - @Nonnull - NodeState execute() - throws CommitFailedException, InterruptedException { - // only do the merge if there are some changes to commit - if (!fastEquals(before, after)) { - long timeout = optimisticMerge(); - if (timeout >= 0) { - pessimisticMerge(timeout); - } - } - return head.get().getChildNode(ROOT); - } - - } - - /** - * Sets the number of seconds to wait for the attempt to grab the lock to - * create a checkpoint - */ - void setCheckpointsLockWaitTime(int checkpointsLockWaitTime) { - this.checkpointsLockWaitTime = checkpointsLockWaitTime; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStoreFactory.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStoreFactory.java deleted file mode 100644 index 80d15bc..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStoreFactory.java +++ /dev/null @@ -1,347 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkState; -import static org.apache.jackrabbit.oak.osgi.OsgiUtil.lookupConfigurationThenFramework; -import static org.apache.jackrabbit.oak.spi.blob.osgi.SplitBlobStoreService.ONLY_STANDALONE_TARGET; -import static org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardUtils.registerMBean; - -import java.io.File; -import java.io.IOException; -import java.util.Dictionary; -import java.util.Hashtable; - -import org.apache.felix.scr.annotations.Activate; -import org.apache.felix.scr.annotations.Component; -import org.apache.felix.scr.annotations.ConfigurationPolicy; -import org.apache.felix.scr.annotations.Deactivate; -import org.apache.felix.scr.annotations.Property; -import org.apache.felix.scr.annotations.Reference; -import org.apache.felix.scr.annotations.ReferenceCardinality; -import org.apache.felix.scr.annotations.ReferencePolicy; -import org.apache.jackrabbit.oak.commons.PropertiesUtil; -import org.apache.jackrabbit.oak.osgi.OsgiWhiteboard; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore.Builder; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStoreStatsMBean; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.apache.jackrabbit.oak.spi.state.NodeStoreProvider; -import org.apache.jackrabbit.oak.spi.state.ProxyNodeStore; -import org.apache.jackrabbit.oak.spi.whiteboard.Registration; -import org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardExecutor; -import org.apache.jackrabbit.oak.stats.StatisticsProvider; -import org.osgi.framework.ServiceRegistration; -import org.osgi.service.component.ComponentContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A factory allowing creation of secondary segment node stores. - *

- * The different secondaries are distinguished by their role attribute. - */ -@Component(policy = ConfigurationPolicy.REQUIRE, - name="org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStoreFactory", - configurationFactory=true, - metatype = true, - label = "Apache Jackrabbit Oak Segment NodeStore Factory", - description = "Factory allowing configuration of adjacent instances of " + - "NodeStore implementation based on Segment model besides a default SegmentNodeStore in same setup." -) -@Deprecated -public class SegmentNodeStoreFactory extends ProxyNodeStore { - - @Deprecated - public static final String NAME = "name"; - - @Property( - label = "Role", - description="As multiple SegmentNodeStores can be configured, this parameter defines the role " + - "of 'this' SegmentNodeStore." - ) - @Deprecated - public static final String ROLE = "role"; - - @Property( - label = "Directory", - description="Directory location used to store the segment tar files. If not specified then looks " + - "for framework property 'repository.home' otherwise use a subdirectory with name 'tarmk'" - ) - @Deprecated - public static final String DIRECTORY = "repository.home"; - - @Property( - label = "Mode", - description="TarMK mode (64 for memory mapping, 32 for normal file access)" - ) - @Deprecated - public static final String MODE = "tarmk.mode"; - - @Property( - intValue = 256, - label = "Maximum Tar File Size (MB)", - description = "TarMK maximum file size (MB)" - ) - @Deprecated - public static final String SIZE = "tarmk.size"; - - @Property( - intValue = 256, - label = "Cache size (MB)", - description = "Cache size for storing most recently used Segments" - ) - @Deprecated - public static final String CACHE = "cache"; - - @Property(boolValue = false, - label = "Custom BlobStore", - description = "Boolean value indicating that a custom BlobStore is to be used. " + - "By default large binary content would be stored within segment tar files" - ) - @Deprecated - public static final String CUSTOM_BLOB_STORE = "customBlobStore"; - - private final Logger log = LoggerFactory.getLogger(getClass()); - - private String name; - - private FileStore store; - - private volatile SegmentNodeStore segmentNodeStore; - - private ComponentContext context; - - @Reference(cardinality = ReferenceCardinality.OPTIONAL_UNARY, - policy = ReferencePolicy.DYNAMIC, target = ONLY_STANDALONE_TARGET) - private volatile BlobStore blobStore; - - @Reference - private StatisticsProvider statisticsProvider = StatisticsProvider.NOOP; - - private ServiceRegistration storeRegistration; - private Registration fileStoreStatsMBean; - private WhiteboardExecutor executor; - - private boolean customBlobStore; - - private String role; - - @Override - protected SegmentNodeStore getNodeStore() { - checkState(segmentNodeStore != null, "service must be activated when used"); - return segmentNodeStore; - } - - @Activate - @Deprecated - public void activate(ComponentContext context) throws IOException { - this.context = context; - this.name = PropertiesUtil.toString(context.getProperties().get(NAME), "SegmentNodeStore instance"); - this.role = property(ROLE); - //In secondaryNodeStore mode customBlobStore is always enabled - this.customBlobStore = Boolean.parseBoolean(property(CUSTOM_BLOB_STORE)) || isSecondaryStoreMode(); - log.info("activate: SegmentNodeStore '"+role+"' starting."); - - if (blobStore == null && customBlobStore) { - log.info("BlobStore use enabled. SegmentNodeStore would be initialized when BlobStore would be available"); - } else { - registerNodeStore(); - } - } - - protected void bindBlobStore(BlobStore blobStore) throws IOException { - this.blobStore = blobStore; - registerNodeStore(); - } - - protected void unbindBlobStore(BlobStore blobStore){ - this.blobStore = null; - unregisterNodeStore(); - } - - @Deactivate - @Deprecated - public void deactivate() { - unregisterNodeStore(); - - synchronized (this) { - segmentNodeStore = null; - - if (store != null) { - store.close(); - store = null; - } - } - } - - private synchronized void registerNodeStore() throws IOException { - if (registerSegmentStore() && role != null) { - registerNodeStoreProvider(); - } - } - - private boolean isSecondaryStoreMode() { - return "secondary".equals(role); - } - - private void registerNodeStoreProvider() { - SegmentNodeStore.SegmentNodeStoreBuilder nodeStoreBuilder = SegmentNodeStore.builder(store); - segmentNodeStore = nodeStoreBuilder.build(); - Dictionary props = new Hashtable(); - props.put(NodeStoreProvider.ROLE, role); - storeRegistration = context.getBundleContext().registerService(NodeStoreProvider.class.getName(), new NodeStoreProvider() { - @Override - public NodeStore getNodeStore() { - return SegmentNodeStoreFactory.this; - } - }, - props); - log.info("Registered NodeStoreProvider backed by SegmentNodeStore of type '{}'", role); - } - - private boolean registerSegmentStore() throws IOException { - if (context == null) { - log.info("Component still not activated. Ignoring the initialization call"); - return false; - } - - OsgiWhiteboard whiteboard = new OsgiWhiteboard(context.getBundleContext()); - - // Build the FileStore - - Builder builder = FileStore.builder(getDirectory()) - .withCacheSize(getCacheSize()) - .withMaxFileSize(getMaxFileSize()) - .withMemoryMapping(getMode().equals("64")) - .withStatisticsProvider(statisticsProvider); - - if (customBlobStore) { - log.info("Initializing SegmentNodeStore with BlobStore [{}]", blobStore); - builder.withBlobStore(blobStore); - } - - try { - store = builder.build(); - } catch (InvalidFileStoreVersionException e) { - log.error("The segment store data is not compatible with the current version. Please use oak-segment-tar or a different version of oak-segment."); - return false; - } - - // Listen for Executor services on the whiteboard - - executor = new WhiteboardExecutor(); - executor.start(whiteboard); - - // Expose statistics about the FileStore - - fileStoreStatsMBean = registerMBean( - whiteboard, - FileStoreStatsMBean.class, - store.getStats(), - FileStoreStatsMBean.TYPE, - "FileStore '" + role + "' statistics" - ); - - return true; - } - - private void unregisterNodeStore() { - if (storeRegistration != null) { - storeRegistration.unregister(); - storeRegistration = null; - } - if (fileStoreStatsMBean != null) { - fileStoreStatsMBean.unregister(); - fileStoreStatsMBean = null; - } - if (executor != null) { - executor.stop(); - executor = null; - } - } - - private File getBaseDirectory() { - String directory = property(DIRECTORY); - - if (directory != null) { - return new File(directory); - } - - return new File("tarmk"); - } - - private File getDirectory() { - String dirName = "segmentstore"; - if (role != null){ - dirName = role + "-" + dirName; - } - return new File(getBaseDirectory(), dirName); - } - - private String getMode() { - String mode = property(MODE); - - if (mode != null) { - return mode; - } - - return System.getProperty(MODE, System.getProperty("sun.arch.data.model", "32")); - } - - private String getCacheSizeProperty() { - String cache = property(CACHE); - - if (cache != null) { - return cache; - } - - return System.getProperty(CACHE); - } - - private int getCacheSize() { - return Integer.parseInt(getCacheSizeProperty()); - } - - private String getMaxFileSizeProperty() { - String size = property(SIZE); - - if (size != null) { - return size; - } - - return System.getProperty(SIZE, "256"); - } - - private int getMaxFileSize() { - return Integer.parseInt(getMaxFileSizeProperty()); - } - - private String property(String name) { - return lookupConfigurationThenFramework(context, name); - } - - //------------------------------------------------------------< Object >-- - - @Override - @Deprecated - public String toString() { - return name + ": " + segmentNodeStore + "[role:" + role + "]"; - } -} \ No newline at end of file diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStoreService.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStoreService.java deleted file mode 100644 index 8f0f260..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStoreService.java +++ /dev/null @@ -1,866 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkState; -import static java.util.Collections.emptyMap; -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.apache.jackrabbit.oak.commons.PropertiesUtil.toBoolean; -import static org.apache.jackrabbit.oak.commons.PropertiesUtil.toInteger; -import static org.apache.jackrabbit.oak.commons.PropertiesUtil.toLong; -import static org.apache.jackrabbit.oak.osgi.OsgiUtil.lookupConfigurationThenFramework; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.CLEANUP_DEFAULT; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.CLONE_BINARIES_DEFAULT; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.FORCE_AFTER_FAIL_DEFAULT; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.GAIN_THRESHOLD_DEFAULT; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.MEMORY_THRESHOLD_DEFAULT; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.PAUSE_DEFAULT; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.PERSIST_COMPACTION_MAP_DEFAULT; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.RETRY_COUNT_DEFAULT; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.TIMESTAMP_DEFAULT; -import static org.apache.jackrabbit.oak.spi.blob.osgi.SplitBlobStoreService.ONLY_STANDALONE_TARGET; -import static org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardUtils.registerMBean; -import static org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardUtils.scheduleWithFixedDelay; - -import java.io.ByteArrayInputStream; -import java.io.Closeable; -import java.io.File; -import java.io.IOException; -import java.util.Collections; -import java.util.Dictionary; -import java.util.Hashtable; -import java.util.concurrent.Callable; -import java.util.concurrent.TimeUnit; - -import org.apache.felix.scr.annotations.Activate; -import org.apache.felix.scr.annotations.Component; -import org.apache.felix.scr.annotations.ConfigurationPolicy; -import org.apache.felix.scr.annotations.Deactivate; -import org.apache.felix.scr.annotations.Property; -import org.apache.felix.scr.annotations.PropertyOption; -import org.apache.felix.scr.annotations.Reference; -import org.apache.felix.scr.annotations.ReferenceCardinality; -import org.apache.felix.scr.annotations.ReferencePolicy; -import org.apache.jackrabbit.commons.SimpleValueFactory; -import org.apache.jackrabbit.oak.api.Descriptors; -import org.apache.jackrabbit.oak.api.jmx.CacheStatsMBean; -import org.apache.jackrabbit.oak.api.jmx.CheckpointMBean; -import org.apache.jackrabbit.oak.cache.CacheStats; -import org.apache.jackrabbit.oak.commons.PropertiesUtil; -import org.apache.jackrabbit.oak.osgi.ObserverTracker; -import org.apache.jackrabbit.oak.osgi.OsgiWhiteboard; -import org.apache.jackrabbit.oak.plugins.blob.BlobGC; -import org.apache.jackrabbit.oak.plugins.blob.BlobGCMBean; -import org.apache.jackrabbit.oak.plugins.blob.BlobGarbageCollector; -import org.apache.jackrabbit.oak.plugins.blob.BlobTrackingStore; -import org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector; -import org.apache.jackrabbit.oak.plugins.blob.SharedDataStore; -import org.apache.jackrabbit.oak.plugins.blob.datastore.BlobIdTracker; -import org.apache.jackrabbit.oak.plugins.blob.datastore.SharedDataStoreUtils; -import org.apache.jackrabbit.oak.plugins.blob.datastore.SharedDataStoreUtils.SharedStoreRecordType; -import org.apache.jackrabbit.oak.plugins.identifier.ClusterRepositoryInfo; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.CleanupType; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategyMBean; -import org.apache.jackrabbit.oak.plugins.segment.compaction.DefaultCompactionStrategyMBean; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore.Builder; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStoreGCMonitor; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStoreStatsMBean; -import org.apache.jackrabbit.oak.plugins.segment.file.GCMonitorMBean; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore; -import org.apache.jackrabbit.oak.spi.commit.Observable; -import org.apache.jackrabbit.oak.spi.commit.Observer; -import org.apache.jackrabbit.oak.spi.gc.GCMonitor; -import org.apache.jackrabbit.oak.spi.gc.GCMonitorTracker; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.apache.jackrabbit.oak.spi.state.NodeStoreProvider; -import org.apache.jackrabbit.oak.spi.state.ProxyNodeStore; -import org.apache.jackrabbit.oak.spi.state.RevisionGC; -import org.apache.jackrabbit.oak.spi.state.RevisionGCMBean; -import org.apache.jackrabbit.oak.spi.whiteboard.CompositeRegistration; -import org.apache.jackrabbit.oak.spi.whiteboard.Registration; -import org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardExecutor; -import org.apache.jackrabbit.oak.stats.Clock; -import org.apache.jackrabbit.oak.stats.StatisticsProvider; -import org.apache.jackrabbit.oak.util.GenericDescriptors; -import org.osgi.framework.Constants; -import org.osgi.framework.ServiceRegistration; -import org.osgi.service.component.ComponentContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * An OSGi wrapper for the segment node store. - */ -@Component(policy = ConfigurationPolicy.REQUIRE, - metatype = true, - label = "Apache Jackrabbit Oak Segment NodeStore Service", - description = "NodeStore implementation based on Segment model. For configuration option refer " + - "to http://jackrabbit.apache.org/oak/docs/osgi_config.html#SegmentNodeStore. Note that for system " + - "stability purpose it is advisable to not change these settings at runtime. Instead the config change " + - "should be done via file system based config file and this view should ONLY be used to determine which " + - "options are supported" -) -@Deprecated -public class SegmentNodeStoreService extends ProxyNodeStore - implements Observable, SegmentStoreProvider { - - @Deprecated - public static final String NAME = "name"; - - @Property( - label = "Directory", - description="Directory location used to store the segment tar files. If not specified then looks " + - "for framework property 'repository.home' otherwise use a subdirectory with name 'tarmk'" - ) - @Deprecated - public static final String DIRECTORY = "repository.home"; - - @Property( - label = "Mode", - description="TarMK mode (64 for memory mapping, 32 for normal file access)" - ) - @Deprecated - public static final String MODE = "tarmk.mode"; - - @Property( - intValue = 256, - label = "Maximum Tar File Size (MB)", - description = "TarMK maximum file size (MB)" - ) - @Deprecated - public static final String SIZE = "tarmk.size"; - - @Property( - intValue = 256, - label = "Cache size (MB)", - description = "Cache size for storing most recently used Segments" - ) - @Deprecated - public static final String CACHE = "cache"; - - @Property( - boolValue = CLONE_BINARIES_DEFAULT, - label = "Clone Binaries", - description = "Clone the binary segments while performing compaction" - ) - @Deprecated - public static final String COMPACTION_CLONE_BINARIES = "compaction.cloneBinaries"; - - @Property(options = { - @PropertyOption(name = "CLEAN_ALL", value = "CLEAN_ALL"), - @PropertyOption(name = "CLEAN_NONE", value = "CLEAN_NONE"), - @PropertyOption(name = "CLEAN_OLD", value = "CLEAN_OLD") }, - value = "CLEAN_OLD", - label = "Cleanup Strategy", - description = "Cleanup strategy used for live in memory segment references while performing cleanup. "+ - "1. CLEAN_NONE: All in memory references are considered valid, " + - "2. CLEAN_OLD: Only in memory references older than a " + - "certain age are considered valid (compaction.cleanup.timestamp), " + - "3. CLEAN_ALL: None of the in memory references are considered valid" - ) - @Deprecated - public static final String COMPACTION_CLEANUP = "compaction.cleanup"; - - @Property( - longValue = TIMESTAMP_DEFAULT, - label = "Reference expiry time (ms)", - description = "Time interval in ms beyond which in memory segment references would be ignored " + - "while performing cleanup" - ) - @Deprecated - public static final String COMPACTION_CLEANUP_TIMESTAMP = "compaction.cleanup.timestamp"; - - @Property( - byteValue = MEMORY_THRESHOLD_DEFAULT, - label = "Memory Multiplier", - description = "TarMK compaction available memory multiplier needed to run compaction" - ) - @Deprecated - public static final String COMPACTION_MEMORY_THRESHOLD = "compaction.memoryThreshold"; - - @Property( - byteValue = GAIN_THRESHOLD_DEFAULT, - label = "Compaction gain threshold", - description = "TarMK compaction gain threshold. The gain estimation prevents compaction from running " + - "if the provided threshold is not met. Value represents a percentage so an input beween 0 and 100 is expected." - ) - @Deprecated - public static final String COMPACTION_GAIN_THRESHOLD = "compaction.gainThreshold"; - - @Property( - boolValue = PAUSE_DEFAULT, - label = "Pause Compaction", - description = "When enabled compaction would not be performed" - ) - @Deprecated - public static final String PAUSE_COMPACTION = "pauseCompaction"; - - @Property( - intValue = RETRY_COUNT_DEFAULT, - label = "Compaction Retries", - description = "Number of tries to compact concurrent commits on top of already " + - "compacted commits" - ) - @Deprecated - public static final String COMPACTION_RETRY_COUNT = "compaction.retryCount"; - - @Property( - boolValue = FORCE_AFTER_FAIL_DEFAULT, - label = "Force Compaction", - description = "Whether or not to force compact concurrent commits on top of already " + - " compacted commits after the maximum number of retries has been reached. " + - "Force committing tries to exclusively write lock the node store." - ) - @Deprecated - public static final String COMPACTION_FORCE_AFTER_FAIL = "compaction.forceAfterFail"; - - @Deprecated - public static final int COMPACTION_LOCK_WAIT_TIME_DEFAULT = 60; - @Property( - intValue = COMPACTION_LOCK_WAIT_TIME_DEFAULT, - label = "Compaction Lock Wait Time", - description = "Number of seconds to wait for the lock for committing compacted changes " + - "respectively to wait for the exclusive write lock for force committing." - ) - @Deprecated - public static final String COMPACTION_LOCK_WAIT_TIME = "compaction.lockWaitTime"; - - @Property( - boolValue = PERSIST_COMPACTION_MAP_DEFAULT, - label = "Persist Compaction Map", - description = "When enabled the compaction map would be persisted instead of being " + - "held in memory" - ) - @Deprecated - public static final String PERSIST_COMPACTION_MAP = "persistCompactionMap"; - - @Property( - boolValue = false, - label = "Standby Mode", - description = "Flag indicating that this component will not register as a NodeStore but just as a NodeStoreProvider" - ) - @Deprecated - public static final String STANDBY = "standby"; - - @Property( - boolValue = false, - label = "Secondary Store Mode", - description = "Flag indicating that this component will not register as a NodeStore but just as a SecondaryNodeStoreProvider" - ) - @Deprecated - public static final String SECONDARY_STORE = "secondary"; - - @Property(boolValue = false, - label = "Custom BlobStore", - description = "Boolean value indicating that a custom BlobStore is to be used. " + - "By default large binary content would be stored within segment tar files" - ) - @Deprecated - public static final String CUSTOM_BLOB_STORE = "customBlobStore"; - - private final Logger log = LoggerFactory.getLogger(getClass()); - - private String name; - - private FileStore store; - - private volatile SegmentNodeStore segmentNodeStore; - - private ObserverTracker observerTracker; - - private GCMonitorTracker gcMonitor; - - private ComponentContext context; - - private CompactionStrategy compactionStrategy; - - @Reference(cardinality = ReferenceCardinality.OPTIONAL_UNARY, - policy = ReferencePolicy.DYNAMIC, target = ONLY_STANDALONE_TARGET) - private volatile BlobStore blobStore; - - @Reference - private StatisticsProvider statisticsProvider = StatisticsProvider.NOOP; - - private ServiceRegistration storeRegistration; - private ServiceRegistration providerRegistration; - private Registration checkpointRegistration; - private Registration revisionGCRegistration; - private Registration blobGCRegistration; - private Registration compactionStrategyRegistration; - private Registration segmentCacheMBean; - private Registration stringCacheMBean; - private Registration fsgcMonitorMBean; - private Registration fileStoreStatsMBean; - private WhiteboardExecutor executor; - private boolean customBlobStore; - - private Registration discoveryLiteDescriptorRegistration; - - private Registration clusterIdDescriptorRegistration; - - /** - * Blob modified before this time duration would be considered for Blob GC - */ - private static final long DEFAULT_BLOB_GC_MAX_AGE = 24 * 60 * 60; - @Property (longValue = DEFAULT_BLOB_GC_MAX_AGE, - label = "Blob GC Max Age (in secs)", - description = "Blob Garbage Collector (GC) logic will only consider those blobs for GC which " + - "are not accessed recently (currentTime - lastModifiedTime > blobGcMaxAgeInSecs). For " + - "example as per default only those blobs which have been created 24 hrs ago will be " + - "considered for GC" - ) - @Deprecated - public static final String PROP_BLOB_GC_MAX_AGE = "blobGcMaxAgeInSecs"; - - /** - * Default interval for taking snapshots of locally tracked blob ids. - */ - private static final long DEFAULT_BLOB_SNAPSHOT_INTERVAL = 12 * 60 * 60; - @Property (longValue = DEFAULT_BLOB_SNAPSHOT_INTERVAL, - label = "Blob tracking snapshot interval (in secs)", - description = "This is the default interval in which the snapshots of locally tracked blob ids will" - + "be taken and synchronized with the blob store. This should be configured to be less than the " - + "frequency of blob GC so that deletions during blob GC can be accounted for " - + "in the next GC execution." - ) - @Deprecated - public static final String PROP_BLOB_SNAPSHOT_INTERVAL = "blobTrackSnapshotIntervalInSecs"; - - @Override - protected SegmentNodeStore getNodeStore() { - checkState(segmentNodeStore != null, "service must be activated when used"); - return segmentNodeStore; - } - - @Activate - @Deprecated - public void activate(ComponentContext context) throws IOException { - this.context = context; - //In secondaryNodeStore mode customBlobStore is always enabled - this.customBlobStore = Boolean.parseBoolean(property(CUSTOM_BLOB_STORE)) || isSecondaryStoreMode(); - - if (blobStore == null && customBlobStore) { - log.info("BlobStore use enabled. SegmentNodeStore would be initialized when BlobStore would be available"); - } else { - registerNodeStore(); - } - } - - protected void bindBlobStore(BlobStore blobStore) throws IOException { - this.blobStore = blobStore; - registerNodeStore(); - } - - protected void unbindBlobStore(BlobStore blobStore){ - this.blobStore = null; - unregisterNodeStore(); - } - - @Deactivate - @Deprecated - public void deactivate() { - unregisterNodeStore(); - - synchronized (this) { - if (observerTracker != null) { - observerTracker.stop(); - } - if (gcMonitor != null) { - gcMonitor.stop(); - } - segmentNodeStore = null; - - if (store != null) { - store.close(); - store = null; - } - } - } - - private synchronized void registerNodeStore() throws IOException { - if (registerSegmentStore()) { - if (toBoolean(property(STANDBY), false)) { - return; - } - - if (isSecondaryStoreMode()){ - registerSecondaryStore(); - return; - } - - if (registerSegmentNodeStore()) { - Dictionary props = new Hashtable(); - props.put(Constants.SERVICE_PID, SegmentNodeStore.class.getName()); - props.put("oak.nodestore.description", new String[]{"nodeStoreType=segment"}); - storeRegistration = context.getBundleContext().registerService(NodeStore.class.getName(), this, props); - } - } - } - - private boolean isSecondaryStoreMode() { - return toBoolean(property(SECONDARY_STORE), false); - } - - private void registerSecondaryStore() { - SegmentNodeStore.SegmentNodeStoreBuilder nodeStoreBuilder = SegmentNodeStore.builder(store); - nodeStoreBuilder.withCompactionStrategy(compactionStrategy); - segmentNodeStore = nodeStoreBuilder.build(); - Dictionary props = new Hashtable(); - props.put(NodeStoreProvider.ROLE, "secondary"); - storeRegistration = context.getBundleContext().registerService(NodeStoreProvider.class.getName(), new NodeStoreProvider() { - @Override - public NodeStore getNodeStore() { - return SegmentNodeStoreService.this; - } - }, - props); - log.info("Registered NodeStoreProvider backed by SegmentNodeStore"); - } - - private boolean registerSegmentStore() throws IOException { - if (context == null) { - log.info("Component still not activated. Ignoring the initialization call"); - return false; - } - - OsgiWhiteboard whiteboard = new OsgiWhiteboard(context.getBundleContext()); - - // Listen for GCMonitor services - - gcMonitor = new GCMonitorTracker(); - gcMonitor.start(whiteboard); - - // Build the FileStore - - Builder builder = FileStore.builder(getDirectory()) - .withCacheSize(getCacheSize()) - .withMaxFileSize(getMaxFileSize()) - .withMemoryMapping(getMode().equals("64")) - .withGCMonitor(gcMonitor) - .withStatisticsProvider(statisticsProvider); - - if (customBlobStore) { - log.info("Initializing SegmentNodeStore with BlobStore [{}]", blobStore); - builder.withBlobStore(blobStore); - } - - try { - store = builder.build(); - } catch (InvalidFileStoreVersionException e) { - log.error("The segment store data is not compatible with the current version. Please use oak-segment-tar or a different version of oak-segment."); - return false; - } - - // Create a compaction strategy - - compactionStrategy = newCompactionStrategy(); - - // Expose an MBean to provide information about the compaction strategy - - compactionStrategyRegistration = registerMBean( - whiteboard, - CompactionStrategyMBean.class, - new DefaultCompactionStrategyMBean(compactionStrategy), - CompactionStrategyMBean.TYPE, - "Segment node store compaction strategy settings" - ); - - // Let the FileStore be aware of the compaction strategy - - store.setCompactionStrategy(compactionStrategy); - - // Expose stats about the segment cache - - CacheStats segmentCacheStats = store.getTracker().getSegmentCacheStats(); - - segmentCacheMBean = registerMBean( - whiteboard, - CacheStatsMBean.class, - segmentCacheStats, - CacheStats.TYPE, - segmentCacheStats.getName() - ); - - // Expose stats about the string cache, if available - - CacheStats stringCacheStats = store.getTracker().getStringCacheStats(); - - if (stringCacheStats != null) { - stringCacheMBean = registerMBean( - whiteboard, - CacheStatsMBean.class, - stringCacheStats,CacheStats.TYPE, - stringCacheStats.getName() - ); - } - - // Listen for Executor services on the whiteboard - - executor = new WhiteboardExecutor(); - executor.start(whiteboard); - - // Expose an MBean to trigger garbage collection - - Runnable triggerGarbageCollection = new Runnable() { - - @Override - public void run() { - store.gc(); - } - - }; - - Runnable cancelGarbageCollection = new Runnable() { - @Override - public void run() { - throw new UnsupportedOperationException("Cancelling revision garbage collection is not supported"); - } - }; - revisionGCRegistration = registerMBean( - whiteboard, - RevisionGCMBean.class, - new RevisionGC(triggerGarbageCollection, cancelGarbageCollection, executor), - RevisionGCMBean.TYPE, - "Segment node store revision garbage collection" - ); - - // Expose statistics about the FileStore - - fileStoreStatsMBean = registerMBean( - whiteboard, - FileStoreStatsMBean.class, - store.getStats(), - FileStoreStatsMBean.TYPE, - "FileStore statistics" - ); - - // Register a monitor for the garbage collection of the FileStore - - FileStoreGCMonitor fsgcm = new FileStoreGCMonitor(Clock.SIMPLE); - - fsgcMonitorMBean = new CompositeRegistration( - whiteboard.register(GCMonitor.class, fsgcm, emptyMap()), - registerMBean( - whiteboard, - GCMonitorMBean.class, - fsgcm, - GCMonitorMBean.TYPE, - "File Store garbage collection monitor" - ), - scheduleWithFixedDelay(whiteboard, fsgcm, 1) - ); - - // Register a factory service to expose the FileStore - - providerRegistration = context.getBundleContext().registerService(SegmentStoreProvider.class.getName(), this, null); - - return true; - } - - private CompactionStrategy newCompactionStrategy() { - boolean pauseCompaction = toBoolean(property(PAUSE_COMPACTION), PAUSE_DEFAULT); - boolean cloneBinaries = toBoolean(property(COMPACTION_CLONE_BINARIES), CLONE_BINARIES_DEFAULT); - long cleanupTs = toLong(property(COMPACTION_CLEANUP_TIMESTAMP), TIMESTAMP_DEFAULT); - int retryCount = toInteger(property(COMPACTION_RETRY_COUNT), RETRY_COUNT_DEFAULT); - boolean forceAfterFail = toBoolean(property(COMPACTION_FORCE_AFTER_FAIL), FORCE_AFTER_FAIL_DEFAULT); - final int lockWaitTime = toInteger(property(COMPACTION_LOCK_WAIT_TIME), COMPACTION_LOCK_WAIT_TIME_DEFAULT); - boolean persistCompactionMap = toBoolean(property(PERSIST_COMPACTION_MAP), PERSIST_COMPACTION_MAP_DEFAULT); - - CleanupType cleanupType = getCleanUpType(); - byte memoryThreshold = getMemoryThreshold(); - byte gainThreshold = getGainThreshold(); - - // This is indeed a dirty hack, but it's needed to break a circular - // dependency between different components. The FileStore needs the - // CompactionStrategy, the CompactionStrategy needs the - // SegmentNodeStore, and the SegmentNodeStore needs the FileStore. - - CompactionStrategy compactionStrategy = new CompactionStrategy(pauseCompaction, cloneBinaries, cleanupType, cleanupTs, memoryThreshold) { - - @Override - public boolean compacted(Callable setHead) throws Exception { - // Need to guard against concurrent commits to avoid - // mixed segments. See OAK-2192. - return segmentNodeStore.locked(setHead, lockWaitTime, SECONDS); - } - - }; - - compactionStrategy.setRetryCount(retryCount); - compactionStrategy.setForceAfterFail(forceAfterFail); - compactionStrategy.setPersistCompactionMap(persistCompactionMap); - compactionStrategy.setGainThreshold(gainThreshold); - - return compactionStrategy; - } - - private boolean registerSegmentNodeStore() throws IOException { - Dictionary properties = context.getProperties(); - name = String.valueOf(properties.get(NAME)); - - final long blobGcMaxAgeInSecs = toLong(property(PROP_BLOB_GC_MAX_AGE), DEFAULT_BLOB_GC_MAX_AGE); - - OsgiWhiteboard whiteboard = new OsgiWhiteboard(context.getBundleContext()); - - SegmentNodeStore.SegmentNodeStoreBuilder nodeStoreBuilder = SegmentNodeStore.builder(store); - nodeStoreBuilder.withCompactionStrategy(compactionStrategy); - segmentNodeStore = nodeStoreBuilder.build(); - - observerTracker = new ObserverTracker(segmentNodeStore); - observerTracker.start(context.getBundleContext()); - - checkpointRegistration = registerMBean(whiteboard, CheckpointMBean.class, new SegmentCheckpointMBean(segmentNodeStore), - CheckpointMBean.TYPE, "Segment node store checkpoint management"); - - // ensure a clusterId is initialized - // and expose it as 'oak.clusterid' repository descriptor - GenericDescriptors clusterIdDesc = new GenericDescriptors(); - clusterIdDesc.put(ClusterRepositoryInfo.OAK_CLUSTERID_REPOSITORY_DESCRIPTOR_KEY, - new SimpleValueFactory().createValue( - ClusterRepositoryInfo.getOrCreateId(segmentNodeStore)), true, false); - clusterIdDescriptorRegistration = whiteboard.register( - Descriptors.class, - clusterIdDesc, - Collections.emptyMap() - ); - - // Register "discovery lite" descriptors - discoveryLiteDescriptorRegistration = whiteboard.register( - Descriptors.class, - new SegmentDiscoveryLiteDescriptors(segmentNodeStore), - Collections.emptyMap() - ); - - // If a shared data store register the repo id in the data store - String repoId = ""; - if (SharedDataStoreUtils.isShared(blobStore)) { - try { - repoId = ClusterRepositoryInfo.getOrCreateId(segmentNodeStore); - ((SharedDataStore) blobStore).addMetadataRecord(new ByteArrayInputStream(new byte[0]), - SharedStoreRecordType.REPOSITORY.getNameFromId(repoId)); - } catch (Exception e) { - throw new IOException("Could not register a unique repositoryId", e); - } - - if (blobStore instanceof BlobTrackingStore) { - final long trackSnapshotInterval = toLong(property(PROP_BLOB_SNAPSHOT_INTERVAL), - DEFAULT_BLOB_SNAPSHOT_INTERVAL); - String root = PropertiesUtil.toString(property(DIRECTORY), "./repository"); - - BlobTrackingStore trackingStore = (BlobTrackingStore) blobStore; - if (trackingStore.getTracker() != null) { - trackingStore.getTracker().close(); - } - ((BlobTrackingStore) blobStore).addTracker( - new BlobIdTracker(root, repoId, trackSnapshotInterval, (SharedDataStore) - blobStore)); - } - } - - if (store.getBlobStore() instanceof GarbageCollectableBlobStore) { - BlobGarbageCollector gc = new MarkSweepGarbageCollector( - new SegmentBlobReferenceRetriever(store.getTracker()), - (GarbageCollectableBlobStore) store.getBlobStore(), - executor, - TimeUnit.SECONDS.toMillis(blobGcMaxAgeInSecs), - repoId - ); - - blobGCRegistration = registerMBean( - whiteboard, - BlobGCMBean.class, - new BlobGC(gc, executor), - BlobGCMBean.TYPE, - "Segment node store blob garbage collection" - ); - } - - log.info("SegmentNodeStore initialized"); - return true; - } - - private void unregisterNodeStore() { - if (discoveryLiteDescriptorRegistration != null) { - discoveryLiteDescriptorRegistration.unregister(); - discoveryLiteDescriptorRegistration = null; - } - if (clusterIdDescriptorRegistration != null) { - clusterIdDescriptorRegistration.unregister(); - clusterIdDescriptorRegistration = null; - } - if (segmentCacheMBean != null) { - segmentCacheMBean.unregister(); - segmentCacheMBean = null; - } - if (stringCacheMBean != null) { - stringCacheMBean.unregister(); - stringCacheMBean = null; - } - if (providerRegistration != null) { - providerRegistration.unregister(); - providerRegistration = null; - } - if (storeRegistration != null) { - storeRegistration.unregister(); - storeRegistration = null; - } - if (checkpointRegistration != null) { - checkpointRegistration.unregister(); - checkpointRegistration = null; - } - if (revisionGCRegistration != null) { - revisionGCRegistration.unregister(); - revisionGCRegistration = null; - } - if (blobGCRegistration != null) { - blobGCRegistration.unregister(); - blobGCRegistration = null; - } - if (compactionStrategyRegistration != null) { - compactionStrategyRegistration.unregister(); - compactionStrategyRegistration = null; - } - if (fsgcMonitorMBean != null) { - fsgcMonitorMBean.unregister(); - fsgcMonitorMBean = null; - } - if (fileStoreStatsMBean != null) { - fileStoreStatsMBean.unregister(); - fileStoreStatsMBean = null; - } - if (executor != null) { - executor.stop(); - executor = null; - } - } - - private File getBaseDirectory() { - String directory = property(DIRECTORY); - - if (directory != null) { - return new File(directory); - } - - return new File("tarmk"); - } - - private File getDirectory() { - return new File(getBaseDirectory(), "segmentstore"); - } - - private String getMode() { - String mode = property(MODE); - - if (mode != null) { - return mode; - } - - return System.getProperty(MODE, System.getProperty("sun.arch.data.model", "32")); - } - - private String getCacheSizeProperty() { - String cache = property(CACHE); - - if (cache != null) { - return cache; - } - - return System.getProperty(CACHE); - } - - private int getCacheSize() { - return Integer.parseInt(getCacheSizeProperty()); - } - - private String getMaxFileSizeProperty() { - String size = property(SIZE); - - if (size != null) { - return size; - } - - return System.getProperty(SIZE, "256"); - } - - private int getMaxFileSize() { - return Integer.parseInt(getMaxFileSizeProperty()); - } - - private CleanupType getCleanUpType() { - String cleanupType = property(COMPACTION_CLEANUP); - - if (cleanupType == null) { - return CLEANUP_DEFAULT; - } - - return CleanupType.valueOf(cleanupType); - } - - private byte getMemoryThreshold() { - String mt = property(COMPACTION_MEMORY_THRESHOLD); - - if (mt == null) { - return MEMORY_THRESHOLD_DEFAULT; - } - - return Byte.valueOf(mt); - } - - private byte getGainThreshold() { - String gt = property(COMPACTION_GAIN_THRESHOLD); - - if (gt == null) { - return GAIN_THRESHOLD_DEFAULT; - } - - return Byte.valueOf(gt); - } - - private String property(String name) { - return lookupConfigurationThenFramework(context, name); - } - - /** - * needed for situations where you have to unwrap the - * SegmentNodeStoreService, to get the SegmentStore, like the failover - */ - @Override - @Deprecated - public SegmentStore getSegmentStore() { - return store; - } - - //------------------------------------------------------------< Observable >--- - - @Override - @Deprecated - public Closeable addObserver(Observer observer) { - return getNodeStore().addObserver(observer); - } - - //------------------------------------------------------------< Object >-- - - @Override - @Deprecated - public String toString() { - return name + ": " + segmentNodeStore; - } -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNotFoundException.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNotFoundException.java deleted file mode 100644 index 6531273..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNotFoundException.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import org.apache.jackrabbit.oak.api.IllegalRepositoryStateException; - -/** - * This exception is thrown when there the segment does not exist in the store - */ -@Deprecated -public class SegmentNotFoundException extends IllegalRepositoryStateException { - - private final String segmentId; - - @Deprecated - public SegmentNotFoundException(SegmentId id) { - super("Segment " + id + " not found"); - this.segmentId = id.toString(); - } - - @Deprecated - public SegmentNotFoundException(SegmentId id, Throwable e) { - super("Segment " + id + " not found", e); - this.segmentId = id.toString(); - } - - @Deprecated - public String getSegmentId() { - return segmentId; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentOverflowException.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentOverflowException.java deleted file mode 100644 index 6adbb4f..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentOverflowException.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -/** - * This exception is thrown by the Segment NodeStore when an internal - * limit is exceeded such as too many segment references. Clients should - * only ever see this exception as the cause of a - * {@link org.apache.jackrabbit.oak.api.CommitFailedException CommitFailedException}. - */ -@Deprecated -public class SegmentOverflowException extends RuntimeException { - - @Deprecated - public SegmentOverflowException(String message) { - super(message); - } - - @Deprecated - public SegmentOverflowException(String message, Throwable cause) { - super(message, cause); - } - - @Deprecated - public SegmentOverflowException(Throwable cause) { - super(cause); - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentParser.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentParser.java deleted file mode 100644 index 7a7382c..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentParser.java +++ /dev/null @@ -1,882 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Lists.newArrayListWithCapacity; -import static java.util.Collections.singletonList; -import static org.apache.jackrabbit.oak.api.Type.BINARY; -import static org.apache.jackrabbit.oak.plugins.segment.ListRecord.LEVEL_SIZE; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.MEDIUM_LIMIT; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.RECORD_ID_BYTES; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.SMALL_LIMIT; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.V_11; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentWriter.BLOCK_SIZE; -import static org.apache.jackrabbit.oak.plugins.segment.Template.MANY_CHILD_NODES; -import static org.apache.jackrabbit.oak.plugins.segment.Template.ZERO_CHILD_NODES; - -import java.util.List; - -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry; -import org.apache.jackrabbit.oak.spi.state.NodeState; - -/** - * {@code SegmentParser} serves as a base class for parsing segments. - *

- * This base class provides means for parsing segments into their various - * kinds of record. Descendants typically parametrise its behaviour by - * overriding the {@code on...()} methods as needed. By default those - * methods just initiate the traversal of the same named record. - *

- * A typical usage for e.g. printing out the sizes of all templates - * would look as follows: - *

-      new TestParser() {
-          protected void onTemplate(RecordId parentId, RecordId templateId) {
-              TemplateInfo templateInfo = parseTemplate(parentId, templateId);
-              System.out.println(templateInfo.size);
-          }
-     }.parseNode(null, nodeId);
- * 
- */ -@Deprecated -public class SegmentParser { - - /** - * Type of blobs (and strings) - */ - @Deprecated - public enum BlobType { - /** Small: < {@link Segment#SMALL_LIMIT}. */ - @Deprecated - SMALL, - - /** Medium: < {@link Segment#MEDIUM_LIMIT} */ - @Deprecated - MEDIUM, - - /** Long: >= {@link Segment#MEDIUM_LIMIT} */ - @Deprecated - LONG, - - /** External blob (i.e. in {@link BlobStore}. */ - @Deprecated - EXTERNAL - } - - /** - * Result type of {@link #parseNode(RecordId)}. - */ - @Deprecated - public static class NodeInfo { - /** Id of this record*/ - @Deprecated - public final RecordId nodeId; - - /** Number of child nodes */ - @Deprecated - public final int nodeCount; - - /** Number of properties */ - @Deprecated - public final int propertyCount; - - /** Size in bytes of this node */ - @Deprecated - public final int size; - - @Deprecated - public NodeInfo(RecordId nodeId, int nodeCount, int propertyCount, int size) { - this.nodeId = nodeId; - this.nodeCount = nodeCount; - this.propertyCount = propertyCount; - this.size = size; - } - } - - /** - * Result type of {@link #parseTemplate(RecordId)}. - */ - @Deprecated - public static class TemplateInfo { - /** Id of this record */ - @Deprecated - public final RecordId templateId; - - /** Nodes of this type have a primary type */ - @Deprecated - public final boolean hasPrimaryType; - - /** Nodes of this type have mixins */ - @Deprecated - public final boolean hasMixinType; - - /** Nodes with this type have no child nodes */ - @Deprecated - public final boolean zeroChildNodes; - - /** Nodes of this type have more than one child node */ - @Deprecated - public final boolean manyChildNodes; - - /** Number of mixins */ - @Deprecated - public final int mixinCount; - - /** Number of properties */ - @Deprecated - public final int propertyCount; - - /** Size in bytes of this template */ - @Deprecated - public final int size; - - @Deprecated - public TemplateInfo(RecordId templateId, boolean hasPrimaryType, boolean hasMixinType, - boolean zeroChildNodes, boolean manyChildNodes, int mixinCount, int propertyCount, int size) { - this.templateId = templateId; - this.hasPrimaryType = hasPrimaryType; - this.hasMixinType = hasMixinType; - this.zeroChildNodes = zeroChildNodes; - this.manyChildNodes = manyChildNodes; - this.mixinCount = mixinCount; - this.propertyCount = propertyCount; - this.size = size; - } - } - - /** - * Result type of {@link #parseMap(RecordId, RecordId, MapRecord)}. - */ - @Deprecated - public static class MapInfo { - /** Id of this record */ - @Deprecated - public final RecordId mapId; - - /** Size in bytes of this map. {@code -1} if not known. */ - @Deprecated - public final int size; - - @Deprecated - public MapInfo(RecordId mapId, int size) { - this.mapId = mapId; - this.size = size; - } - } - - /** - * Result type of {@link #parseProperty(RecordId, RecordId, PropertyTemplate)}. - */ - @Deprecated - public static class PropertyInfo { - /** Id of this record */ - @Deprecated - public final RecordId propertyId; - - /** Number of values in properties of this type. {@code -1} for single value properties. */ - @Deprecated - public final int count; - - /** Size in bytes of this property */ - @Deprecated - public final int size; - - @Deprecated - public PropertyInfo(RecordId propertyId, int count, int size) { - this.propertyId = propertyId; - this.count = count; - this.size = size; - } - } - - /** Result type of {@link #parseValue(RecordId, RecordId, Type)}. */ - @Deprecated - public static class ValueInfo { - /** Id of this record */ - @Deprecated - public final RecordId valueId; - - /** Type of this value */ - @Deprecated - public final Type type; - - @Deprecated - public ValueInfo(RecordId valueId, Type type) { - this.valueId = valueId; - this.type = type; - } - } - - /** Return type of {@link #parseBlob(RecordId)}. */ - @Deprecated - public static class BlobInfo { - /** Id of this record */ - @Deprecated - public final RecordId blobId; - - /** Type of this blob */ - @Deprecated - public final BlobType blobType; - - /** Size in bytes of this blob */ - @Deprecated - public final int size; - - @Deprecated - public BlobInfo(RecordId blobId, BlobType blobType, int size) { - this.blobId = blobId; - this.blobType = blobType; - this.size = size; - } - } - - /** Return type of {@link #parseList(RecordId, RecordId, int)} . */ - @Deprecated - public static class ListInfo { - /** Id of this record */ - @Deprecated - public final RecordId listId; - - /** Number of items in this list */ - @Deprecated - public final int count; - - /** Size in bytes of this list */ - @Deprecated - public final int size; - - @Deprecated - public ListInfo(RecordId listId, int count, int size) { - this.listId = listId; - this.count = count; - this.size = size; - } - } - - /** Return type of {@link #parseListBucket(RecordId, int, int, int)}. */ - @Deprecated - public static class ListBucketInfo { - /** Id of this record */ - @Deprecated - public final RecordId listId; - - /** {@code true} if this is a leaf bucket, {@code false} otherwise. */ - @Deprecated - public final boolean leaf; - - /** Entries of this bucket */ - @Deprecated - public final List entries; - - /** Size in bytes of this bucket. */ - @Deprecated - public final int size; - - @Deprecated - public ListBucketInfo(RecordId listId, boolean leaf, List entries, int size) { - this.listId = listId; - this.leaf = leaf; - this.entries = entries; - this.size = size; - } - } - - /** - * Callback called by {@link #parseNode(RecordId)} upon encountering - * a child node. - * - * @param parentId id of the parent node - * @param nodeId if of the child node - */ - @Deprecated - protected void onNode(RecordId parentId, RecordId nodeId) { - parseNode(nodeId); - } - - /** - * Callback called by {@link #parseNode(RecordId)} upon encountering - * a template - * - * @param parentId id of the node being parsed - * @param templateId id of the template - */ - @Deprecated - protected void onTemplate(RecordId parentId, RecordId templateId) { - parseTemplate(templateId); - } - - /** - * Callback called by {@link #parseNode(RecordId)}, - * {@link #parseMapDiff(RecordId, MapRecord)} and - * {@link #parseMapBranch(RecordId, MapRecord)} upon encountering a map. - * - * @param parentId the id of the parent of the map - * @param mapId the id of the map - * @param map the map - */ - @Deprecated - protected void onMap(RecordId parentId, RecordId mapId, MapRecord map) { - parseMap(parentId, mapId, map); - } - - /** - * Callback called by {@link #parseMap(RecordId, RecordId, MapRecord)} upon encountering - * a map diff. - * - * @param parentId the id of the parent map - * @param mapId the id of the map - * @param map the map - */ - @Deprecated - protected void onMapDiff(RecordId parentId, RecordId mapId, MapRecord map) { - parseMapDiff(mapId, map); - } - - /** - * Callback called by {@link #parseMap(RecordId, RecordId, MapRecord)} upon encountering - * a map leaf. - * - * @param parentId the id of the parent map - * @param mapId the id of the map - * @param map the map - */ - @Deprecated - protected void onMapLeaf(RecordId parentId, RecordId mapId, MapRecord map) { - parseMapLeaf(mapId, map); - } - - /** - * Callback called by {@link #parseMap(RecordId, RecordId, MapRecord)} upon encountering - * a map branch. - * - * @param parentId the id of the parent map - * @param mapId the id of the map - * @param map the map - */ - @Deprecated - protected void onMapBranch(RecordId parentId, RecordId mapId, MapRecord map) { - parseMapBranch(mapId, map); - } - - /** - * Callback called by {@link #parseNode(RecordId)} upon encountering - * a property. - * - * @param parentId the id of the parent node - * @param propertyId the id of the property - * @param template the property template - */ - @Deprecated - protected void onProperty(RecordId parentId, RecordId propertyId, PropertyTemplate template) { - parseProperty(parentId, propertyId, template); - } - - /** - * Callback called by {@link #parseProperty(RecordId, RecordId, PropertyTemplate)} upon - * encountering a value. - * - * @param parentId the id the value's parent - * @param valueId the id of the value - * @param type the type of the value - */ - @Deprecated - protected void onValue(RecordId parentId, RecordId valueId, Type type) { - parseValue(parentId, valueId, type); - } - - /** - * Callback called by {@link #parseValue(RecordId, RecordId, Type)} upon encountering a blob. - * - * @param parentId the id of the blob's parent - * @param blobId the id of the blob - */ - @Deprecated - protected void onBlob(RecordId parentId, RecordId blobId) { - parseBlob(blobId); - } - - /** - * Callback called by {@link #parseTemplate(RecordId)}, - * {@link #parseMapLeaf(RecordId, MapRecord)} and - * {@link #parseValue(RecordId, RecordId, Type)} upon encountering a string. - * - * @param parentId the id of the string's parent - * @param stringId the id of the string - */ - @Deprecated - protected void onString(RecordId parentId, RecordId stringId) { - parseString(stringId); - } - - /** - * Callback called by {@link #parseNode(RecordId)}, - * {@link #parseProperty(RecordId, RecordId, PropertyTemplate)}, - * {@link #parseTemplate(RecordId)}, - * {@link #parseBlob(RecordId)} and - * {@link #parseString(RecordId)} upon encountering a list. - * - * @param parentId the id of the list's parent - * @param listId the id of the list - * @param count the number of elements in the list - */ - @Deprecated - protected void onList(RecordId parentId, RecordId listId, int count) { - parseList(parentId, listId, count); - } - - /** - * Callback called by {@link #parseList(RecordId, RecordId, int)} and - * {@link #parseListBucket(RecordId, int, int, int)} upon encountering a list - * bucket. - * - * @param parentId the id of the list's parent - * @param listId the id of the list - * @param index the index into the bucket - * @param count the number of items in the bucket - * @param capacity the capacity of the bucket - */ - @Deprecated - protected void onListBucket(RecordId parentId, RecordId listId, int index, int count, int capacity) { - parseListBucket(listId, index, count, capacity); - } - - /** - * Parse a node record - * @param nodeId - * @return - */ - @Deprecated - public NodeInfo parseNode(RecordId nodeId) { - int size = 0; - int nodeCount = 0; - int propertyCount = 0; - - Segment segment = nodeId.getSegment(); - int offset = nodeId.getOffset(); - RecordId templateId = segment.readRecordId(offset); - onTemplate(nodeId, templateId); - - Template template = segment.readTemplate(templateId); - - // Recurses into child nodes in this segment - if (template.getChildName() == MANY_CHILD_NODES) { - RecordId childMapId = segment.readRecordId(offset + RECORD_ID_BYTES); - MapRecord childMap = segment.readMap(childMapId); - onMap(nodeId, childMapId, childMap); - for (ChildNodeEntry childNodeEntry : childMap.getEntries()) { - NodeState child = childNodeEntry.getNodeState(); - if (child instanceof SegmentNodeState) { - RecordId childId = ((SegmentNodeState) child).getRecordId(); - onNode(nodeId, childId); - nodeCount++; - } - } - } else if (template.getChildName() != ZERO_CHILD_NODES) { - RecordId childId = segment.readRecordId(offset + RECORD_ID_BYTES); - onNode(nodeId, childId); - nodeCount++; - } - - int ids = template.getChildName() == ZERO_CHILD_NODES ? 1 : 2; - size += ids * RECORD_ID_BYTES; - - // Recurse into properties - PropertyTemplate[] propertyTemplates = template.getPropertyTemplates(); - if (segment.getSegmentVersion().onOrAfter(V_11)) { - if (propertyTemplates.length > 0) { - size += RECORD_ID_BYTES; - RecordId id = segment.readRecordId(offset + ids * RECORD_ID_BYTES); - ListRecord pIds = new ListRecord(id, - propertyTemplates.length); - for (int i = 0; i < propertyTemplates.length; i++) { - RecordId propertyId = pIds.getEntry(i); - onProperty(nodeId, propertyId, propertyTemplates[i]); - propertyCount++; - } - onList(nodeId, id, propertyTemplates.length); - } - } else { - for (PropertyTemplate propertyTemplate : propertyTemplates) { - size += RECORD_ID_BYTES; - RecordId propertyId = segment.readRecordId(offset + ids++ * RECORD_ID_BYTES); - onProperty(nodeId, propertyId, propertyTemplate); - propertyCount++; - } - } - return new NodeInfo(nodeId, nodeCount, propertyCount, size); - } - - /** - * Parse a template record - * @param templateId - * @return - */ - @Deprecated - public TemplateInfo parseTemplate(RecordId templateId) { - int size = 0; - - Segment segment = templateId.getSegment(); - int offset = templateId.getOffset(); - int head = segment.readInt(offset + size); - boolean hasPrimaryType = (head & (1 << 31)) != 0; - boolean hasMixinTypes = (head & (1 << 30)) != 0; - boolean zeroChildNodes = (head & (1 << 29)) != 0; - boolean manyChildNodes = (head & (1 << 28)) != 0; - int mixinCount = (head >> 18) & ((1 << 10) - 1); - int propertyCount = head & ((1 << 18) - 1); - size += 4; - - if (hasPrimaryType) { - RecordId primaryId = segment.readRecordId(offset + size); - onString(templateId, primaryId); - size += RECORD_ID_BYTES; - } - - if (hasMixinTypes) { - for (int i = 0; i < mixinCount; i++) { - RecordId mixinId = segment.readRecordId(offset + size); - onString(templateId, mixinId); - size += RECORD_ID_BYTES; - } - } - - if (!zeroChildNodes && !manyChildNodes) { - RecordId childNameId = segment.readRecordId(offset + size); - onString(templateId, childNameId); - size += RECORD_ID_BYTES; - } - - if (segment.getSegmentVersion().onOrAfter(V_11)) { - if (propertyCount > 0) { - RecordId listId = segment.readRecordId(offset + size); - size += RECORD_ID_BYTES; - ListRecord propertyNames = new ListRecord(listId, propertyCount); - for (int i = 0; i < propertyCount; i++) { - RecordId propertyNameId = propertyNames.getEntry(i); - size++; // type - onString(templateId, propertyNameId); - } - onList(templateId, listId, propertyCount); - } - } else { - for (int i = 0; i < propertyCount; i++) { - RecordId propertyNameId = segment.readRecordId(offset + size); - size += RECORD_ID_BYTES; - size++; // type - onString(templateId, propertyNameId); - } - } - - return new TemplateInfo(templateId, hasPrimaryType, hasMixinTypes, - zeroChildNodes, manyChildNodes, mixinCount, propertyCount, size); - } - - /** - * Parse a map record - * @param parentId parent of this map or {@code null} if none - * @param mapId - * @param map - * @return - */ - @Deprecated - public MapInfo parseMap(RecordId parentId, RecordId mapId, MapRecord map) { - if (map.isDiff()) { - onMapDiff(parentId, mapId, map); - } else if (map.isLeaf()) { - onMapLeaf(parentId, mapId, map); - } else { - onMapBranch(parentId, mapId, map); - } - - return new MapInfo(mapId, -1); - } - - /** - * Parse a map diff record - * @param mapId - * @param map - * @return - */ - @Deprecated - public MapInfo parseMapDiff(RecordId mapId, MapRecord map) { - int size = 4; // -1 - size += 4; // hash of changed key - size += RECORD_ID_BYTES; // key - size += RECORD_ID_BYTES; // value - size += RECORD_ID_BYTES; // base - - RecordId baseId = mapId.getSegment() - .readRecordId(mapId.getOffset() + 8 + 2 * RECORD_ID_BYTES); - onMap(mapId, baseId, new MapRecord(baseId)); - - return new MapInfo(mapId, size); - } - - /** - * Parse a map leaf record - * @param mapId - * @param map - * @return - */ - @Deprecated - public MapInfo parseMapLeaf(RecordId mapId, MapRecord map) { - int size = 4; // size - size += map.size() * 4; // key hashes - - for (MapEntry entry : map.getEntries()) { - size += 2 * RECORD_ID_BYTES; // key value pairs - onString(mapId, entry.getKey()); - } - - return new MapInfo(mapId, size); - } - - /** - * Parse a map branch record - * @param mapId - * @param map - * @return - */ - @Deprecated - public MapInfo parseMapBranch(RecordId mapId, MapRecord map) { - int size = 4; // level/size - size += 4; // bitmap - for (MapRecord bucket : map.getBuckets()) { - if (bucket != null) { - size += RECORD_ID_BYTES; - onMap(map.getRecordId(), bucket.getRecordId(), bucket); - } - } - - return new MapInfo(mapId, size); - } - - /** - * Parse a property - * @param parentId - * @param propertyId - * @param template - * @return - */ - @Deprecated - public PropertyInfo parseProperty(RecordId parentId, RecordId propertyId, PropertyTemplate template) { - int size = 0; - int count = -1; // -1 -> single valued property - - Segment segment = propertyId.getSegment(); - int offset = propertyId.getOffset(); - Type type = template.getType(); - - if (type.isArray()) { - count = segment.readInt(offset); - size += 4; - - if (count > 0) { - RecordId listId = segment.readRecordId(offset + 4); - size += RECORD_ID_BYTES; - for (RecordId valueId : new ListRecord(listId, count).getEntries()) { - onValue(propertyId, valueId, type.getBaseType()); - } - onList(propertyId, listId, count); - } - } else { - onValue(parentId, propertyId, type); - } - - return new PropertyInfo(propertyId, count, size); - } - - /** - * Parse a value racrod - * @param parentId parent of the value record, {@code null} if none - * @param valueId - * @param type - * @return - */ - @Deprecated - public ValueInfo parseValue(RecordId parentId, RecordId valueId, Type type) { - checkArgument(!type.isArray()); - if (type == BINARY) { - onBlob(parentId, valueId); - } else { - onString(parentId, valueId); - } - return new ValueInfo(valueId, type); - } - - /** - * Parse a blob record - * @param blobId - * @return - */ - @Deprecated - public BlobInfo parseBlob(RecordId blobId) { - int size = 0; - BlobType blobType; - - Segment segment = blobId.getSegment(); - int offset = blobId.getOffset(); - byte head = segment.readByte(offset); - if ((head & 0x80) == 0x00) { - // 0xxx xxxx: small value - size += (1 + head); - blobType = BlobType.SMALL; - } else if ((head & 0xc0) == 0x80) { - // 10xx xxxx: medium value - int length = (segment.readShort(offset) & 0x3fff) + SMALL_LIMIT; - size += (2 + length); - blobType = BlobType.MEDIUM; - } else if ((head & 0xe0) == 0xc0) { - // 110x xxxx: long value - long length = (segment.readLong(offset) & 0x1fffffffffffffffL) + MEDIUM_LIMIT; - int count = (int) ((length + BLOCK_SIZE - 1) / BLOCK_SIZE); - RecordId listId = segment.readRecordId(offset + 8); - onList(blobId, listId, count); - size += (8 + RECORD_ID_BYTES + length); - blobType = BlobType.LONG; - } else if ((head & 0xf0) == 0xe0) { - // 1110 xxxx: external value - int length = (head & 0x0f) << 8 | (segment.readByte(offset + 1) & 0xff); - size += (2 + length); - blobType = BlobType.EXTERNAL; - } else { - throw new IllegalStateException(String.format( - "Unexpected value record type: %02x", head & 0xff)); - } - - return new BlobInfo(blobId, blobType, size); - } - - /** - * Parse a string record - * @param stringId - * @return - */ - @Deprecated - public BlobInfo parseString(RecordId stringId) { - int size = 0; - BlobType blobType; - - Segment segment = stringId.getSegment(); - int offset = stringId.getOffset(); - - long length = segment.readLength(offset); - if (length < Segment.SMALL_LIMIT) { - size += (1 + length); - blobType = BlobType.SMALL; - } else if (length < Segment.MEDIUM_LIMIT) { - size += (2 + length); - blobType = BlobType.MEDIUM; - } else if (length < Integer.MAX_VALUE) { - int count = (int) ((length + BLOCK_SIZE - 1) / BLOCK_SIZE); - RecordId listId = segment.readRecordId(offset + 8); - onList(stringId, listId, count); - size += (8 + RECORD_ID_BYTES + length); - blobType = BlobType.LONG; - } else { - throw new IllegalStateException("String is too long: " + length); - } - - return new BlobInfo(stringId, blobType, size); - } - - /** - * Parse a list record - * @param parentId parent of the list, {@code null} if none - * @param listId - * @param count - * @return - */ - @Deprecated - public ListInfo parseList(RecordId parentId, RecordId listId, int count) { - if (count != 0) { - onListBucket(parentId, listId, 0, count, count); - } - - return new ListInfo(listId, count, noOfListSlots(count) * RECORD_ID_BYTES); - } - - /** - * Parse item of list buckets - * @param listId - * @param index index of the first item to parse - * @param count number of items to parse - * @param capacity total number of items - * @return - */ - @Deprecated - public ListBucketInfo parseListBucket(RecordId listId, int index, int count, int capacity) { - Segment segment = listId.getSegment(); - - int bucketSize = 1; - while (bucketSize * LEVEL_SIZE < capacity) { - bucketSize *= LEVEL_SIZE; - } - - List entries; - if (capacity == 1) { - entries = singletonList(listId); - return new ListBucketInfo(listId, true, entries, RECORD_ID_BYTES); - } else if (bucketSize == 1) { - entries = newArrayListWithCapacity(count); - for (int i = 0; i < count; i++) { - entries.add(segment.readRecordId(getOffset(listId, index + i))); - } - return new ListBucketInfo(listId, true, entries, count * RECORD_ID_BYTES); - } else { - entries = newArrayList(); - while (count > 0) { - int bucketIndex = index / bucketSize; - int bucketOffset = index % bucketSize; - RecordId bucketId = segment.readRecordId(getOffset(listId, bucketIndex)); - entries.add(bucketId); - int c = Math.min(bucketSize, capacity - bucketIndex * bucketSize); - int n = Math.min(c - bucketOffset, count); - onListBucket(listId, bucketId, bucketOffset, n, c); - - index += n; - count -= n; - } - return new ListBucketInfo(listId, false, entries, entries.size() * RECORD_ID_BYTES); - } - } - - private static int getOffset(RecordId id, int ids) { - return id.getOffset() + ids * Segment.RECORD_ID_BYTES; - } - - private static int noOfListSlots(int size) { - if (size <= LEVEL_SIZE) { - return size; - } else { - int fullBuckets = size / LEVEL_SIZE; - if (size % LEVEL_SIZE > 1) { - return size + noOfListSlots(fullBuckets + 1); - } else { - return size + noOfListSlots(fullBuckets); - } - } - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentPropertyState.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentPropertyState.java deleted file mode 100644 index eb33e04..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentPropertyState.java +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkElementIndex; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; -import static com.google.common.collect.Lists.newArrayListWithCapacity; -import static com.google.common.collect.Maps.newHashMap; -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonList; -import static org.apache.jackrabbit.oak.api.Type.BINARIES; -import static org.apache.jackrabbit.oak.api.Type.BINARY; -import static org.apache.jackrabbit.oak.api.Type.BOOLEAN; -import static org.apache.jackrabbit.oak.api.Type.DATE; -import static org.apache.jackrabbit.oak.api.Type.DECIMAL; -import static org.apache.jackrabbit.oak.api.Type.DOUBLE; -import static org.apache.jackrabbit.oak.api.Type.LONG; -import static org.apache.jackrabbit.oak.api.Type.NAME; -import static org.apache.jackrabbit.oak.api.Type.PATH; -import static org.apache.jackrabbit.oak.api.Type.REFERENCE; -import static org.apache.jackrabbit.oak.api.Type.STRING; -import static org.apache.jackrabbit.oak.api.Type.URI; -import static org.apache.jackrabbit.oak.api.Type.WEAKREFERENCE; - -import java.util.List; -import java.util.Map; - -import javax.annotation.Nonnull; -import javax.jcr.PropertyType; - -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.plugins.memory.AbstractPropertyState; -import org.apache.jackrabbit.oak.plugins.value.Conversions; -import org.apache.jackrabbit.oak.plugins.value.Conversions.Converter; - -/** - * A property, which can read a value or list record from a segment. It - * currently doesn't cache data. - *

- * Depending on the property type, this is a record of type "VALUE" or a record - * of type "LIST" (for arrays). - */ -@Deprecated -public class SegmentPropertyState extends Record implements PropertyState { - - private final PropertyTemplate template; - - @Deprecated - public SegmentPropertyState(RecordId id, PropertyTemplate template) { - super(id); - this.template = checkNotNull(template); - } - - private ListRecord getValueList(Segment segment) { - RecordId listId = getRecordId(); - int size = 1; - if (isArray()) { - size = segment.readInt(getOffset()); - if (size > 0) { - listId = segment.readRecordId(getOffset(4)); - } - } - return new ListRecord(listId, size); - } - - Map getValueRecords() { - if (getType().tag() == PropertyType.BINARY) { - return emptyMap(); - } - - Map map = newHashMap(); - - Segment segment = getSegment(); - ListRecord values = getValueList(segment); - for (int i = 0; i < values.size(); i++) { - RecordId valueId = values.getEntry(i); - String value = Segment.readString(valueId); - map.put(value, valueId); - } - - return map; - } - - @Override @Nonnull - @Deprecated - public String getName() { - return template.getName(); - } - - @Override - @Deprecated - public Type getType() { - return template.getType(); - } - - @Override - @Deprecated - public boolean isArray() { - return getType().isArray(); - } - - @Override - @Deprecated - public int count() { - if (isArray()) { - return getSegment().readInt(getOffset()); - } else { - return 1; - } - } - - @Override @Nonnull @SuppressWarnings("unchecked") - @Deprecated - public T getValue(Type type) { - Segment segment = getSegment(); - if (isArray()) { - checkState(type.isArray()); - ListRecord values = getValueList(segment); - if (values.size() == 0) { - return (T) emptyList(); - } else if (values.size() == 1) { - return (T) singletonList(getValue( - segment, values.getEntry(0), type.getBaseType())); - } else { - Type base = type.getBaseType(); - List list = newArrayListWithCapacity(values.size()); - for (RecordId id : values.getEntries()) { - list.add(getValue(segment, id, base)); - } - return (T) list; - } - } else { - RecordId id = getRecordId(); - if (type.isArray()) { - return (T) singletonList( - getValue(segment, id, type.getBaseType())); - } else { - return getValue(segment, id, type); - } - } - } - - @Override - @Deprecated - public long size() { - return size(0); - } - - @Override @Nonnull - @Deprecated - public T getValue(Type type, int index) { - checkNotNull(type); - checkArgument(!type.isArray(), "Type must not be an array type"); - - Segment segment = getSegment(); - ListRecord values = getValueList(segment); - checkElementIndex(index, values.size()); - return getValue(segment, values.getEntry(index), type); - } - - @SuppressWarnings("unchecked") - private T getValue(Segment segment, RecordId id, Type type) { - if (type == BINARY) { - return (T) new SegmentBlob(id); // load binaries lazily - } - - String value = Segment.readString(id); - if (type == STRING || type == URI || type == DATE - || type == NAME || type == PATH - || type == REFERENCE || type == WEAKREFERENCE) { - return (T) value; // no conversion needed for string types - } - - Type base = getType(); - if (base.isArray()) { - base = base.getBaseType(); - } - Converter converter = Conversions.convert(value, base); - if (type == BOOLEAN) { - return (T) Boolean.valueOf(converter.toBoolean()); - } else if (type == DECIMAL) { - return (T) converter.toDecimal(); - } else if (type == DOUBLE) { - return (T) Double.valueOf(converter.toDouble()); - } else if (type == LONG) { - return (T) Long.valueOf(converter.toLong()); - } else { - throw new UnsupportedOperationException( - "Unknown type: " + type); - } - } - - @Override - @Deprecated - public long size(int index) { - ListRecord values = getValueList(getSegment()); - checkElementIndex(index, values.size()); - RecordId entry = values.getEntry(index); - - if (getType().equals(BINARY) || getType().equals(BINARIES)) { - return new SegmentBlob(entry).length(); - } - - return getSegment().readLength(entry); - } - - //------------------------------------------------------------< Object >-- - - @Override - @Deprecated - public boolean equals(Object object) { - // optimize for common cases - if (this == object) { // don't use fastEquals here due to value sharing - return true; - } else if (object instanceof SegmentPropertyState) { - SegmentPropertyState that = (SegmentPropertyState) object; - if (!template.equals(that.template)) { - return false; - } else if (getRecordId().equals(that.getRecordId())) { - return true; - } - } - // fall back to default equality check in AbstractPropertyState - return object instanceof PropertyState - && AbstractPropertyState.equal(this, (PropertyState) object); - } - - @Override - @Deprecated - public int hashCode() { - return AbstractPropertyState.hashCode(this); - } - - @Override - @Deprecated - public String toString() { - return AbstractPropertyState.toString(this); - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentStore.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentStore.java deleted file mode 100644 index 570010c..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentStore.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import java.io.Closeable; -import java.io.IOException; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; - -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; - -/** - * The backend storage interface used by the segment node store. - */ -@Deprecated -public interface SegmentStore extends Closeable { - - @Deprecated - SegmentTracker getTracker(); - - /** - * Returns the head state. - * - * @return head state - */ - @Nonnull - @Deprecated - SegmentNodeState getHead(); - - @Deprecated - boolean setHead(SegmentNodeState base, SegmentNodeState head); - - /** - * Checks whether the identified segment exists in this store. - * - * @param id segment identifier - * @return {@code true} if the segment exists, {@code false} otherwise - */ - @Deprecated - boolean containsSegment(SegmentId id); - - /** - * Reads the identified segment from this store. - * - * @param segmentId segment identifier - * @return identified segment, or a {@link SegmentNotFoundException} thrown if not found - */ - @CheckForNull - @Deprecated - Segment readSegment(SegmentId segmentId); - - /** - * Writes the given segment to the segment store. - * - * @param id segment identifier - * @param bytes byte buffer that contains the raw contents of the segment - * @param offset start offset within the byte buffer - * @param length length of the segment - */ - @Deprecated - void writeSegment(SegmentId id, byte[] bytes, int offset, int length) throws IOException; - - @Deprecated - void close(); - - /** - * Read a blob from external storage. - * - * @param reference blob reference - * @return external blob - */ - @Deprecated - Blob readBlob(String reference); - - /** - * Returns the external BlobStore (if configured) with this store - */ - @CheckForNull - @Deprecated - BlobStore getBlobStore(); - - /** - * Triggers removal of segments that are no longer referenceable. - */ - @Deprecated - void gc(); - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentStoreProvider.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentStoreProvider.java deleted file mode 100644 index 02d15d9..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentStoreProvider.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -@Deprecated -public interface SegmentStoreProvider { - - @Deprecated - SegmentStore getSegmentStore(); -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentStream.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentStream.java deleted file mode 100644 index 3d29cd0..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentStream.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkPositionIndexes; -import static com.google.common.base.Preconditions.checkState; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentWriter.BLOCK_SIZE; - -import java.io.IOException; -import java.io.InputStream; -import java.util.List; - -import javax.annotation.CheckForNull; - -import com.google.common.base.Charsets; -import com.google.common.io.ByteStreams; - -/** - * For reading any record of type "VALUE" as binary streams. - */ -@Deprecated -public class SegmentStream extends InputStream { - - @CheckForNull - @Deprecated - public static RecordId getRecordIdIfAvailable( - InputStream stream, SegmentStore store) { - if (stream instanceof SegmentStream) { - SegmentStream sstream = (SegmentStream) stream; - RecordId id = sstream.recordId; - if (sstream.position == 0 - && store.containsSegment(id.getSegmentId())) { - return id; - } - } - return null; - } - - private final RecordId recordId; - - private final byte[] inline; - - private final ListRecord blocks; - - private final long length; - - private long position = 0; - - private long mark = 0; - - SegmentStream(RecordId recordId, ListRecord blocks, long length) { - this.recordId = checkNotNull(recordId); - this.inline = null; - this.blocks = checkNotNull(blocks); - checkArgument(length >= 0); - this.length = length; - } - - SegmentStream(RecordId recordId, byte[] inline) { - this.recordId = checkNotNull(recordId); - this.inline = checkNotNull(inline); - this.blocks = null; - this.length = inline.length; - } - - @Deprecated - public long getLength() { - return length; - } - - @Deprecated - public String getString() { - if (inline != null) { - return new String(inline, Charsets.UTF_8); - } else if (length > Integer.MAX_VALUE) { - throw new IllegalStateException("Too long value: " + length); - } else { - SegmentStream stream = new SegmentStream(recordId, blocks, length); - try { - byte[] data = new byte[(int) length]; - ByteStreams.readFully(stream, data); - return new String(data, Charsets.UTF_8); - } catch (IOException e) { - throw new IllegalStateException("Unexpected IOException", e); - } finally { - stream.close(); - } - } - } - - @Override - @Deprecated - public boolean markSupported() { - return true; - } - - @Override - @Deprecated - public synchronized void mark(int readlimit) { - mark = position; - } - - @Override - @Deprecated - public synchronized void reset() { - position = mark; - } - - @Override - @Deprecated - public int read() { - byte[] b = new byte[1]; - if (read(b, 0, 1) != -1) { - return b[0] & 0xff; - } else { - return -1; - } - } - - @Override - @Deprecated - public int read(byte[] b, int off, int len) { - checkNotNull(b); - checkPositionIndexes(off, off + len, b.length); - - if (len == 0) { - return 0; - } else if (position == length) { - return -1; - } - - if (position + len > length) { - len = (int) (length - position); // > 0 given the earlier check - } - - if (inline != null) { - System.arraycopy(inline, (int) position, b, off, len); - } else { - int blockIndex = (int) (position / BLOCK_SIZE); - int blockOffset = (int) (position % BLOCK_SIZE); - int blockCount = - (blockOffset + len + BLOCK_SIZE - 1) // round up - / BLOCK_SIZE; - - int remaining = len; - List ids = blocks.getEntries(blockIndex, blockCount); - RecordId first = ids.get(0); // guaranteed to contain at least one - int count = 1; - for (int i = 1; i <= ids.size(); i++) { - RecordId id = null; - if (i < ids.size()) { - id = ids.get(i); - } - - if (id != null - && id.getSegmentId().equals(first.getSegmentId()) - && id.getOffset() == first.getOffset() + count * BLOCK_SIZE) { - count++; - } else { - int blockSize = Math.min( - blockOffset + remaining, count * BLOCK_SIZE); - BlockRecord block = new BlockRecord(first, blockSize); - int n = blockSize - blockOffset; - checkState(block.read(blockOffset, b, off, n) == n); - off += n; - remaining -= n; - - first = id; - count = 1; - blockOffset = 0; - } - } - checkState(remaining == 0); - } - - position += len; - return len; - } - - @Override - @Deprecated - public long skip(long n) { - if (position + n > length) { - n = length - position; - } else if (position + n < 0) { - n = -position; - } - position += n; - return n; - } - - @Override - @Deprecated - public int available() { - if (inline != null) { - return (int) (length - position); // <= inline.length - } else { - return 0; - } - } - - @Override - @Deprecated - public void close() { - position = length; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentTracker.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentTracker.java deleted file mode 100644 index b7128cc..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentTracker.java +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Queues.newArrayDeque; -import static com.google.common.collect.Sets.newHashSet; -import static java.lang.Boolean.getBoolean; - -import java.io.IOException; -import java.security.SecureRandom; -import java.util.Queue; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; - -import org.apache.jackrabbit.oak.cache.CacheLIRS; -import org.apache.jackrabbit.oak.cache.CacheLIRS.EvictionCallback; -import org.apache.jackrabbit.oak.cache.CacheStats; -import org.apache.jackrabbit.oak.plugins.blob.ReferenceCollector; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.cache.RemovalCause; - -/** - * Tracker of references to segment identifiers and segment instances - * that are currently kept in memory. - *

- * It is also responsible to cache segment objects in memory. - */ -@Deprecated -public class SegmentTracker { - - /** Logger instance */ - private static final Logger log = - LoggerFactory.getLogger(SegmentTracker.class); - - /** - * Disable the {@link #stringCache} if {@code true} and fall back to - * the previous {@link Segment#strings} caching mechanism. - */ - private static final boolean DISABLE_STRING_CACHE = getBoolean("oak.segment.disableStringCache"); - - static final String STRING_CACHE_SIZE = "oak.segment.stringCache"; - - private static final long MSB_MASK = ~(0xfL << 12); - - private static final long VERSION = (0x4L << 12); - - private static final long LSB_MASK = ~(0xfL << 60); - - private static final long DATA = 0xAL << 60; - - private static final long BULK = 0xBL << 60; - - private static final long MB = 1024 * 1024; - - private static final int DEFAULT_MEMORY_CACHE_SIZE = 256; - - /** - * The random number source for generating new segment identifiers. - */ - private final SecureRandom random = new SecureRandom(); - - private final SegmentStore store; - - private final SegmentWriter writer; - - /** - * Serialized map that contains the link between old record - * identifiers and identifiers of the corresponding records - * after compaction. - */ - private final AtomicReference compactionMap; - - /** - * Hash table of weak references to segment identifiers that are - * currently being accessed. The size of the table is always a power - * of two, which optimizes the {@code refresh()} operation. The table is - * indexed by the random identifier bits, which guarantees uniform - * distribution of entries. Each table entry is either {@code null} - * (when there are no matching identifiers) or a list of weak references - * to the matching identifiers. - */ - private final SegmentIdTable[] tables = new SegmentIdTable[32]; - - /** - * Cache for string records - */ - private final StringCache stringCache; - - /** - * Cache of recently accessed segments - */ - private final CacheLIRS segmentCache; - - /** - * Number of segments - */ - private final AtomicInteger segmentCounter = new AtomicInteger(); - - private final SegmentVersion segmentVersion; - - @Deprecated - public SegmentTracker(SegmentStore store, int cacheSizeMB, - SegmentVersion version) { - this.segmentVersion = version; - - for (int i = 0; i < tables.length; i++) { - tables[i] = new SegmentIdTable(this); - } - - this.store = store; - this.compactionMap = new AtomicReference( - CompactionMap.EMPTY); - this.writer = createSegmentWriter("sys"); - StringCache c; - if (DISABLE_STRING_CACHE) { - c = null; - } else { - long cache = Long.getLong(STRING_CACHE_SIZE, (long) cacheSizeMB); - c = new StringCache(cache * MB); - } - stringCache = c; - segmentCache = CacheLIRS.newBuilder() - .module("SegmentTracker") - .maximumWeight((long) cacheSizeMB * MB) - .averageWeight(Segment.MAX_SEGMENT_SIZE/2) - .evictionCallback(new EvictionCallback() { - @Override - public void evicted(SegmentId segmentId, Segment segment, RemovalCause cause) { - if (segment != null) { - segmentId.setSegment(null); - } - } - }) - .build(); - } - - @Deprecated - public SegmentTracker(SegmentStore store, SegmentVersion version) { - this(store, DEFAULT_MEMORY_CACHE_SIZE, version); - } - - @Deprecated - public SegmentTracker(SegmentStore store) { - this(store, DEFAULT_MEMORY_CACHE_SIZE, SegmentVersion.V_11); - } - - /** - * Increment and get the number of segments - * @return - */ - int getNextSegmentNo() { - return segmentCounter.incrementAndGet(); - } - - /** - * @return a new {@link SegmentWriter} instance for writing to this store. - */ - @Deprecated - public final SegmentWriter createSegmentWriter(String wid) { - return new SegmentWriter(store, segmentVersion, wid); - } - - @Nonnull - @Deprecated - public CacheStats getSegmentCacheStats() { - return new CacheStats(segmentCache, "Segment Cache", null, -1); - } - - @CheckForNull - @Deprecated - public CacheStats getStringCacheStats() { - return stringCache == null - ? null - : stringCache.getStats(); - } - - @Deprecated - public SegmentWriter getWriter() { - return writer; - } - - @Deprecated - public SegmentStore getStore() { - return store; - } - - /** - * Clear the caches - */ - @Deprecated - public synchronized void clearCache() { - segmentCache.invalidateAll(); - if (stringCache != null) { - stringCache.clear(); - } - } - - /** - * Get the string cache, if there is one. - * - * @return the string cache or {@code null} if none is configured - */ - StringCache getStringCache() { - return stringCache; - } - - /** - * Get a segment from the cache - * @param id segment id - * @return segment with the given {@code id} or {@code null} if not in the cache - */ - Segment getCachedSegment(SegmentId id) { - try { - return segmentCache.get(id); - } catch (ExecutionException e) { - log.error("Error reading from segment cache", e); - return null; - } - } - - /** - * Read a segment from the underlying segment store. - * @param id segment id - * @return segment with the given id - * @throws SegmentNotFoundException if no segment with the given {@code id} exists. - */ - Segment readSegment(SegmentId id) { - try { - Segment segment = store.readSegment(id); - setSegment(id, segment); - return segment; - } catch (SegmentNotFoundException snfe) { - long delta = System.currentTimeMillis() - id.getCreationTime(); - log.error("Segment not found: {}. Creation date delta is {} ms.", - id, delta, snfe); - throw snfe; - } - } - - void setSegment(SegmentId id, Segment segment) { - id.setSegment(segment); - segmentCache.put(id, segment, segment.size()); - } - - @Deprecated - public void setCompactionMap(PartialCompactionMap map) { - compactionMap.set(compactionMap.get().cons(map)); - } - - @Nonnull - @Deprecated - public CompactionMap getCompactionMap() { - return compactionMap.get(); - } - - /** - * Returns all segment identifiers that are currently referenced in memory. - * - * @return referenced segment identifiers - */ - @Deprecated - public synchronized Set getReferencedSegmentIds() { - Set ids = newHashSet(); - for (SegmentIdTable table : tables) { - table.collectReferencedIds(ids); - } - return ids; - } - - /** - * Finds all external blob references that are currently accessible - * in this repository and adds them to the given collector. Useful - * for collecting garbage in an external data store. - *

- * Note that this method only collects blob references that are already - * stored in the repository (at the time when this method is called), so - * the garbage collector will need some other mechanism for tracking - * in-memory references and references stored while this method is - * running. - */ - @Deprecated - public void collectBlobReferences(ReferenceCollector collector) { - try { - Set processed = newHashSet(); - for (SegmentId sid : getReferencedSegmentIds()) { - if (sid.isDataSegmentId()) { - processed.add(sid.asUUID()); - } - } - Queue queue = newArrayDeque(processed); - writer.flush(); // force the current segment to have root record info - while (!queue.isEmpty()) { - UUID uid = queue.remove(); - SegmentId id = getSegmentId(uid.getMostSignificantBits(), - uid.getLeastSignificantBits()); - Segment segment = null; - try { - segment = id.getSegment(); - } catch (SegmentNotFoundException ex) { - // gc'ed - } - if (segment == null) { - continue; - } - segment.collectBlobReferences(collector); - for (SegmentId refid : segment.getReferencedIds()) { - UUID rid = refid.asUUID(); - if (refid.isDataSegmentId() && !processed.contains(rid)) { - queue.add(rid); - processed.add(rid); - } - } - } - } catch (IOException e) { - log.error("Error while flushing pending segments", e); - throw new IllegalStateException("Unexpected IOException", e); - } - } - - /** - * - * @param msb - * @param lsb - * @return the segment id - */ - @Deprecated - public SegmentId getSegmentId(long msb, long lsb) { - int index = ((int) msb) & (tables.length - 1); - return tables[index].getSegmentId(msb, lsb); - } - - SegmentId newDataSegmentId() { - return newSegmentId(DATA); - } - - SegmentId newBulkSegmentId() { - return newSegmentId(BULK); - } - - private SegmentId newSegmentId(long type) { - long msb = (random.nextLong() & MSB_MASK) | VERSION; - long lsb = (random.nextLong() & LSB_MASK) | type; - return getSegmentId(msb, lsb); - } - - @Deprecated - public synchronized void clearSegmentIdTables(CompactionStrategy strategy) { - for (SegmentIdTable table : tables) { - table.clearSegmentIdTables(strategy); - } - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentVersion.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentVersion.java deleted file mode 100644 index 935c799..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentVersion.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static java.util.Collections.max; -import static java.util.EnumSet.allOf; - -import java.util.Comparator; - -import com.google.common.primitives.UnsignedBytes; - -/** - * Version of the segment storage format. - *

    - *
  • 10 = all Oak versions previous to 11
  • - *
  • 11 = all Oak versions starting from 1.0.12, 1.1.7 and 1.2
  • - *
- */ -@Deprecated -public enum SegmentVersion { - - /** - * @deprecated Use latest version V11 - */ - @Deprecated - V_10((byte) 10), - - @Deprecated - V_11((byte) 11); - - /** - * Latest segment version - */ - @Deprecated - public static SegmentVersion LATEST_VERSION = max(allOf(SegmentVersion.class), - new Comparator() { - @Override - public int compare(SegmentVersion v1, SegmentVersion v2) { - return UnsignedBytes.compare(v1.version, v2.version); - } - }); - - private final byte version; - - SegmentVersion(byte version) { - this.version = version; - } - - @Deprecated - public boolean onOrAfter(SegmentVersion other) { - return compareTo(other) >= 0; - } - - @Deprecated - public static byte asByte(SegmentVersion v) { - return v.version; - } - - @Deprecated - public static SegmentVersion fromByte(byte v) { - if (v == V_11.version) { - return V_11; - } else if (v == V_10.version) { - return V_10; - } else { - throw new IllegalArgumentException("Unknown version " + v); - } - } - - @Deprecated - public static boolean isValid(byte v) { - return v == V_10.version || v == V_11.version; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java deleted file mode 100644 index 1564a11..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java +++ /dev/null @@ -1,911 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Charsets.UTF_8; -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkElementIndex; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkPositionIndex; -import static com.google.common.base.Preconditions.checkPositionIndexes; -import static com.google.common.base.Preconditions.checkState; -import static com.google.common.collect.Iterables.addAll; -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Lists.newArrayListWithCapacity; -import static com.google.common.collect.Lists.newArrayListWithExpectedSize; -import static com.google.common.collect.Lists.partition; -import static com.google.common.collect.Maps.newHashMap; -import static com.google.common.collect.Sets.newHashSet; -import static com.google.common.io.ByteStreams.read; -import static com.google.common.io.Closeables.close; -import static java.lang.String.valueOf; -import static java.lang.Thread.currentThread; -import static java.util.Arrays.asList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.nCopies; -import static org.apache.jackrabbit.oak.api.Type.BINARIES; -import static org.apache.jackrabbit.oak.api.Type.BINARY; -import static org.apache.jackrabbit.oak.api.Type.NAME; -import static org.apache.jackrabbit.oak.api.Type.NAMES; -import static org.apache.jackrabbit.oak.api.Type.STRING; -import static org.apache.jackrabbit.oak.plugins.segment.MapRecord.BUCKETS_PER_LEVEL; -import static org.apache.jackrabbit.oak.plugins.segment.RecordWriters.newBlobIdWriter; -import static org.apache.jackrabbit.oak.plugins.segment.RecordWriters.newBlockWriter; -import static org.apache.jackrabbit.oak.plugins.segment.RecordWriters.newListBucketWriter; -import static org.apache.jackrabbit.oak.plugins.segment.RecordWriters.newListWriter; -import static org.apache.jackrabbit.oak.plugins.segment.RecordWriters.newMapBranchWriter; -import static org.apache.jackrabbit.oak.plugins.segment.RecordWriters.newMapLeafWriter; -import static org.apache.jackrabbit.oak.plugins.segment.RecordWriters.newNodeStateWriter; -import static org.apache.jackrabbit.oak.plugins.segment.RecordWriters.newTemplateWriter; -import static org.apache.jackrabbit.oak.plugins.segment.RecordWriters.newValueWriter; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.MAX_SEGMENT_SIZE; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.align; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.readString; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.V_11; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.SequenceInputStream; -import java.util.Arrays; -import java.util.Collection; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import javax.jcr.PropertyType; - -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.plugins.memory.ModifiedNodeState; -import org.apache.jackrabbit.oak.plugins.segment.RecordWriters.RecordWriter; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry; -import org.apache.jackrabbit.oak.spi.state.DefaultNodeStateDiff; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Converts nodes, properties, and values to records, which are written to segments. - */ -@Deprecated -public class SegmentWriter { - private static final Logger LOG = LoggerFactory.getLogger(SegmentWriter.class); - - static final int BLOCK_SIZE = 1 << 12; // 4kB - - private final SegmentBufferWriterPool segmentBufferWriterPool = new SegmentBufferWriterPool(); - - private static final int STRING_RECORDS_CACHE_SIZE = Integer.getInteger( - "oak.segment.writer.stringsCacheSize", 15000); - - /** - * Cache of recently stored string records, used to avoid storing duplicates - * of frequently occurring data. - */ - final Map stringCache = newItemsCache( - STRING_RECORDS_CACHE_SIZE); - - private static final int TPL_RECORDS_CACHE_SIZE = Integer.getInteger( - "oak.segment.writer.templatesCacheSize", 3000); - - /** - * Cache of recently stored template records, used to avoid storing - * duplicates of frequently occurring data. - */ - final Map templateCache = newItemsCache(TPL_RECORDS_CACHE_SIZE); - - private static final Map newItemsCache(final int size) { - final boolean disabled = size <= 0; - final int safeSize = size <= 0 ? 0 : (int) (size * 1.2); - return new LinkedHashMap(safeSize, 0.9f, true) { - @Override - protected boolean removeEldestEntry(Map.Entry e) { - return size() > size; - } - @Override - public synchronized RecordId get(Object key) { - if (disabled) { - return null; - } - return super.get(key); - } - @Override - public synchronized RecordId put(T key, RecordId value) { - if (disabled) { - return null; - } - return super.put(key, value); - } - @Override - public synchronized void clear() { - super.clear(); - } - }; - } - - private final SegmentStore store; - - /** - * Version of the segment storage format. - */ - private final SegmentVersion version; - - private final String wid; - - /** - * @param store store to write to - * @param version segment version to write - * @param wid id of this writer - */ - @Deprecated - public SegmentWriter(SegmentStore store, SegmentVersion version, String wid) { - this.store = store; - this.version = version; - this.wid = wid; - } - - @Deprecated - public void flush() throws IOException { - segmentBufferWriterPool.flush(); - } - - @Deprecated - public void dropCache() { - stringCache.clear(); - templateCache.clear(); - } - - MapRecord writeMap(MapRecord base, Map changes) throws IOException { - if (base != null && base.isDiff()) { - Segment segment = base.getSegment(); - RecordId key = segment.readRecordId(base.getOffset(8)); - String name = readString(key); - if (!changes.containsKey(name)) { - changes.put(name, segment.readRecordId(base.getOffset(8, 1))); - } - base = new MapRecord(segment.readRecordId(base.getOffset(8, 2))); - } - - if (base != null && changes.size() == 1) { - Map.Entry change = - changes.entrySet().iterator().next(); - RecordId value = change.getValue(); - if (value != null) { - MapEntry entry = base.getEntry(change.getKey()); - if (entry != null) { - if (value.equals(entry.getValue())) { - return base; - } else { - return writeRecord(newMapBranchWriter(entry.getHash(), - asList(entry.getKey(), value, base.getRecordId()))); - } - } - } - } - - List entries = newArrayList(); - for (Map.Entry entry : changes.entrySet()) { - String key = entry.getKey(); - - RecordId keyId = null; - if (base != null) { - MapEntry e = base.getEntry(key); - if (e != null) { - keyId = e.getKey(); - } - } - if (keyId == null && entry.getValue() != null) { - keyId = writeString(key); - } - - if (keyId != null) { - entries.add(new MapEntry(key, keyId, entry.getValue())); - } - } - return writeMapBucket(base, entries, 0); - } - - private MapRecord writeMapLeaf(int level, Collection entries) throws IOException { - checkNotNull(entries); - int size = entries.size(); - checkElementIndex(size, MapRecord.MAX_SIZE); - checkPositionIndex(level, MapRecord.MAX_NUMBER_OF_LEVELS); - checkArgument(size != 0 || level == MapRecord.MAX_NUMBER_OF_LEVELS); - return writeRecord(newMapLeafWriter(level, entries)); - } - - private MapRecord writeMapBranch(int level, int size, MapRecord[] buckets) throws IOException { - int bitmap = 0; - List bucketIds = newArrayListWithCapacity(buckets.length); - for (int i = 0; i < buckets.length; i++) { - if (buckets[i] != null) { - bitmap |= 1L << i; - bucketIds.add(buckets[i].getRecordId()); - } - } - return writeRecord(newMapBranchWriter(level, size, bitmap, bucketIds)); - } - - private MapRecord writeMapBucket(MapRecord base, Collection entries, int level) throws IOException { - // when no changed entries, return the base map (if any) as-is - if (entries == null || entries.isEmpty()) { - if (base != null) { - return base; - } else if (level == 0) { - return writeRecord(newMapLeafWriter()); - } else { - return null; - } - } - - // when no base map was given, write a fresh new map - if (base == null) { - // use leaf records for small maps or the last map level - if (entries.size() <= BUCKETS_PER_LEVEL - || level == MapRecord.MAX_NUMBER_OF_LEVELS) { - return writeMapLeaf(level, entries); - } - - // write a large map by dividing the entries into buckets - MapRecord[] buckets = new MapRecord[BUCKETS_PER_LEVEL]; - List> changes = splitToBuckets(entries, level); - for (int i = 0; i < BUCKETS_PER_LEVEL; i++) { - buckets[i] = writeMapBucket(null, changes.get(i), level + 1); - } - - // combine the buckets into one big map - return writeMapBranch(level, entries.size(), buckets); - } - - // if the base map is small, update in memory and write as a new map - if (base.isLeaf()) { - Map map = newHashMap(); - for (MapEntry entry : base.getEntries()) { - map.put(entry.getName(), entry); - } - for (MapEntry entry : entries) { - if (entry.getValue() != null) { - map.put(entry.getName(), entry); - } else { - map.remove(entry.getName()); - } - } - return writeMapBucket(null, map.values(), level); - } - - // finally, the if the base map is large, handle updates per bucket - int newSize = 0; - int newCount = 0; - MapRecord[] buckets = base.getBuckets(); - List> changes = splitToBuckets(entries, level); - for (int i = 0; i < BUCKETS_PER_LEVEL; i++) { - buckets[i] = writeMapBucket(buckets[i], changes.get(i), level + 1); - if (buckets[i] != null) { - newSize += buckets[i].size(); - newCount++; - } - } - - // OAK-654: what if the updated map is smaller? - if (newSize > BUCKETS_PER_LEVEL) { - return writeMapBranch(level, newSize, buckets); - } else if (newCount <= 1) { - // up to one bucket contains entries, so return that as the new map - for (MapRecord bucket : buckets) { - if (bucket != null) { - return bucket; - } - } - // no buckets remaining, return empty map - return writeMapBucket(null, null, level); - } else { - // combine all remaining entries into a leaf record - List list = newArrayList(); - for (MapRecord bucket : buckets) { - if (bucket != null) { - addAll(list, bucket.getEntries()); - } - } - return writeMapLeaf(level, list); - } - } - - /** - * Writes a list record containing the given list of record identifiers. - * - * @param list list of record identifiers - * @return list record identifier - */ - @Deprecated - public RecordId writeList(List list) throws IOException { - checkNotNull(list); - checkArgument(!list.isEmpty()); - List thisLevel = list; - while (thisLevel.size() > 1) { - List nextLevel = newArrayList(); - for (List bucket : - partition(thisLevel, ListRecord.LEVEL_SIZE)) { - if (bucket.size() > 1) { - nextLevel.add(writeListBucket(bucket)); - } else { - nextLevel.add(bucket.get(0)); - } - } - thisLevel = nextLevel; - } - return thisLevel.iterator().next(); - } - - private RecordId writeListBucket(List bucket) throws IOException { - checkArgument(bucket.size() > 1); - return writeRecord(newListBucketWriter(bucket)); - } - - private static List> splitToBuckets(Collection entries, int level) { - List empty = null; - int mask = (1 << MapRecord.BITS_PER_LEVEL) - 1; - int shift = 32 - (level + 1) * MapRecord.BITS_PER_LEVEL; - - List> buckets = - newArrayList(nCopies(MapRecord.BUCKETS_PER_LEVEL, empty)); - for (MapEntry entry : entries) { - int index = (entry.getHash() >> shift) & mask; - List bucket = buckets.get(index); - if (bucket == null) { - bucket = newArrayList(); - buckets.set(index, bucket); - } - bucket.add(entry); - } - return buckets; - } - - private RecordId writeValueRecord(long length, RecordId blocks) throws IOException { - long len = (length - Segment.MEDIUM_LIMIT) | (0x3L << 62); - return writeRecord(newValueWriter(blocks, len)); - } - - private RecordId writeValueRecord(int length, byte[] data) throws IOException { - checkArgument(length < Segment.MEDIUM_LIMIT); - return writeRecord(newValueWriter(length, data)); - } - - /** - * Writes a string value record. - * - * @param string string to be written - * @return value record identifier - */ - @Deprecated - public RecordId writeString(String string) throws IOException { - RecordId id = stringCache.get(string); - if (id != null) { - return id; // shortcut if the same string was recently stored - } - - byte[] data = string.getBytes(UTF_8); - - if (data.length < Segment.MEDIUM_LIMIT) { - // only cache short strings to avoid excessive memory use - id = writeValueRecord(data.length, data); - stringCache.put(string, id); - return id; - } - - int pos = 0; - List blockIds = newArrayListWithExpectedSize( - data.length / BLOCK_SIZE + 1); - - // write as many full bulk segments as possible - while (pos + MAX_SEGMENT_SIZE <= data.length) { - SegmentId bulkId = store.getTracker().newBulkSegmentId(); - store.writeSegment(bulkId, data, pos, MAX_SEGMENT_SIZE); - for (int i = 0; i < MAX_SEGMENT_SIZE; i += BLOCK_SIZE) { - blockIds.add(new RecordId(bulkId, i)); - } - pos += MAX_SEGMENT_SIZE; - } - - // inline the remaining data as block records - while (pos < data.length) { - int len = Math.min(BLOCK_SIZE, data.length - pos); - blockIds.add(writeBlock(data, pos, len)); - pos += len; - } - - return writeValueRecord(data.length, writeList(blockIds)); - } - - @Deprecated - public SegmentBlob writeBlob(Blob blob) throws IOException { - if (blob instanceof SegmentBlob - && store.containsSegment(((SegmentBlob) blob).getRecordId().getSegmentId())) { - return (SegmentBlob) blob; - } - - String reference = blob.getReference(); - if (reference != null && store.getBlobStore() != null) { - String blobId = store.getBlobStore().getBlobId(reference); - if (blobId != null) { - RecordId id = writeBlobId(blobId); - return new SegmentBlob(id); - } else { - LOG.debug("No blob found for reference {}, inlining...", reference); - } - } - - return writeStream(blob.getNewStream()); - } - - /** - * Write a reference to an external blob. This method handles blob IDs of - * every length, but behaves differently for small and large blob IDs. - * - * @param blobId Blob ID. - * @return Record ID pointing to the written blob ID. - * @see Segment#BLOB_ID_SMALL_LIMIT - */ - private RecordId writeBlobId(String blobId) throws IOException { - byte[] data = blobId.getBytes(UTF_8); - if (data.length < Segment.BLOB_ID_SMALL_LIMIT) { - return writeRecord(newBlobIdWriter(data)); - } else { - return writeRecord(newBlobIdWriter(writeString(blobId))); - } - } - - /** - * Writes a block record containing the given block of bytes. - * - * @param bytes source buffer - * @param offset offset within the source buffer - * @param length number of bytes to write - * @return block record identifier - */ - RecordId writeBlock(byte[] bytes, int offset, int length) throws IOException { - checkNotNull(bytes); - checkPositionIndexes(offset, offset + length, bytes.length); - return writeRecord(newBlockWriter(bytes, offset, length)); - } - - SegmentBlob writeExternalBlob(String blobId) throws IOException { - RecordId id = writeBlobId(blobId); - return new SegmentBlob(id); - } - - SegmentBlob writeLargeBlob(long length, List list) throws IOException { - RecordId id = writeValueRecord(length, writeList(list)); - return new SegmentBlob(id); - } - - /** - * Writes a stream value record. The given stream is consumed - * and closed by this method. - * - * @param stream stream to be written - * @return value record identifier - * @throws IOException if the stream could not be read - */ - @Deprecated - public SegmentBlob writeStream(InputStream stream) throws IOException { - boolean threw = true; - try { - RecordId id = SegmentStream.getRecordIdIfAvailable(stream, store); - if (id == null) { - id = internalWriteStream(stream); - } - threw = false; - return new SegmentBlob(id); - } finally { - close(stream, threw); - } - } - - private RecordId internalWriteStream(InputStream stream) - throws IOException { - BlobStore blobStore = store.getBlobStore(); - byte[] data = new byte[Segment.MEDIUM_LIMIT]; - int n = read(stream, data, 0, data.length); - - // Special case for short binaries (up to about 16kB): - // store them directly as small- or medium-sized value records - if (n < Segment.MEDIUM_LIMIT) { - return writeValueRecord(n, data); - } else if (blobStore != null) { - String blobId = blobStore.writeBlob(new SequenceInputStream( - new ByteArrayInputStream(data, 0, n), stream)); - return writeBlobId(blobId); - } - - data = Arrays.copyOf(data, MAX_SEGMENT_SIZE); - n += read(stream, data, n, MAX_SEGMENT_SIZE - n); - long length = n; - List blockIds = - newArrayListWithExpectedSize(2 * n / BLOCK_SIZE); - - // Write the data to bulk segments and collect the list of block ids - while (n != 0) { - SegmentId bulkId = store.getTracker().newBulkSegmentId(); - int len = align(n, 1 << Segment.RECORD_ALIGN_BITS); - LOG.debug("Writing bulk segment {} ({} bytes)", bulkId, n); - store.writeSegment(bulkId, data, 0, len); - - for (int i = 0; i < n; i += BLOCK_SIZE) { - blockIds.add(new RecordId(bulkId, data.length - len + i)); - } - - n = read(stream, data, 0, data.length); - length += n; - } - - return writeValueRecord(length, writeList(blockIds)); - } - - @Deprecated - public RecordId writeProperty(PropertyState state) throws IOException { - Map previousValues = emptyMap(); - return writeProperty(state, previousValues); - } - - private RecordId writeProperty(PropertyState state, Map previousValues) throws IOException { - Type type = state.getType(); - int count = state.count(); - - List valueIds = newArrayList(); - for (int i = 0; i < count; i++) { - if (type.tag() == PropertyType.BINARY) { - try { - SegmentBlob blob = - writeBlob(state.getValue(BINARY, i)); - valueIds.add(blob.getRecordId()); - } catch (IOException e) { - throw new IllegalStateException("Unexpected IOException", e); - } - } else { - String value = state.getValue(STRING, i); - RecordId valueId = previousValues.get(value); - if (valueId == null) { - valueId = writeString(value); - } - valueIds.add(valueId); - } - } - - if (!type.isArray()) { - return valueIds.iterator().next(); - } else if (count == 0) { - return writeRecord(newListWriter()); - } else { - return writeRecord(newListWriter(count, writeList(valueIds))); - } - } - - @Deprecated - public RecordId writeTemplate(Template template) throws IOException { - checkNotNull(template); - - RecordId id = templateCache.get(template); - if (id != null) { - return id; // shortcut if the same template was recently stored - } - - Collection ids = newArrayList(); - int head = 0; - - RecordId primaryId = null; - PropertyState primaryType = template.getPrimaryType(); - if (primaryType != null) { - head |= 1 << 31; - primaryId = writeString(primaryType.getValue(NAME)); - ids.add(primaryId); - } - - List mixinIds = null; - PropertyState mixinTypes = template.getMixinTypes(); - if (mixinTypes != null) { - head |= 1 << 30; - mixinIds = newArrayList(); - for (String mixin : mixinTypes.getValue(NAMES)) { - mixinIds.add(writeString(mixin)); - } - ids.addAll(mixinIds); - checkState(mixinIds.size() < (1 << 10)); - head |= mixinIds.size() << 18; - } - - RecordId childNameId = null; - String childName = template.getChildName(); - if (childName == Template.ZERO_CHILD_NODES) { - head |= 1 << 29; - } else if (childName == Template.MANY_CHILD_NODES) { - head |= 1 << 28; - } else { - childNameId = writeString(childName); - ids.add(childNameId); - } - - PropertyTemplate[] properties = template.getPropertyTemplates(); - RecordId[] propertyNames = new RecordId[properties.length]; - byte[] propertyTypes = new byte[properties.length]; - for (int i = 0; i < properties.length; i++) { - // Note: if the property names are stored in more than 255 separate - // segments, this will not work. - propertyNames[i] = writeString(properties[i].getName()); - Type type = properties[i].getType(); - if (type.isArray()) { - propertyTypes[i] = (byte) -type.tag(); - } else { - propertyTypes[i] = (byte) type.tag(); - } - } - - RecordId propNamesId = null; - if (version.onOrAfter(V_11)) { - if (propertyNames.length > 0) { - propNamesId = writeList(asList(propertyNames)); - ids.add(propNamesId); - } - } else { - ids.addAll(asList(propertyNames)); - } - - checkState(propertyNames.length < (1 << 18)); - head |= propertyNames.length; - - RecordId tid = writeRecord(newTemplateWriter(ids, propertyNames, - propertyTypes, head, primaryId, mixinIds, childNameId, - propNamesId, version)); - templateCache.put(template, tid); - return tid; - } - - @Deprecated - public SegmentNodeState writeNode(NodeState state) throws IOException { - if (state instanceof SegmentNodeState) { - SegmentNodeState sns = uncompact((SegmentNodeState) state); - if (sns != state || store.containsSegment( - sns.getRecordId().getSegmentId())) { - return sns; - } - } - - SegmentNodeState before = null; - Template beforeTemplate = null; - ModifiedNodeState after = null; - if (state instanceof ModifiedNodeState) { - after = (ModifiedNodeState) state; - NodeState base = after.getBaseState(); - if (base instanceof SegmentNodeState) { - SegmentNodeState sns = uncompact((SegmentNodeState) base); - if (sns != base || store.containsSegment( - sns.getRecordId().getSegmentId())) { - before = sns; - beforeTemplate = before.getTemplate(); - } - } - } - - Template template = new Template(state); - RecordId templateId; - if (before != null && template.equals(beforeTemplate)) { - templateId = before.getTemplateId(); - } else { - templateId = writeTemplate(template); - } - - List ids = newArrayList(); - ids.add(templateId); - - String childName = template.getChildName(); - if (childName == Template.MANY_CHILD_NODES) { - MapRecord base; - Map childNodes; - if (before != null - && before.getChildNodeCount(2) > 1 - && after.getChildNodeCount(2) > 1) { - base = before.getChildNodeMap(); - childNodes = new ChildNodeCollectorDiff().diff(before, after); - } else { - base = null; - childNodes = newHashMap(); - for (ChildNodeEntry entry : state.getChildNodeEntries()) { - childNodes.put( - entry.getName(), - writeNode(entry.getNodeState()).getRecordId()); - } - } - ids.add(writeMap(base, childNodes).getRecordId()); - } else if (childName != Template.ZERO_CHILD_NODES) { - ids.add(writeNode(state.getChildNode(template.getChildName())).getRecordId()); - } - - List pIds = newArrayList(); - for (PropertyTemplate pt : template.getPropertyTemplates()) { - String name = pt.getName(); - PropertyState property = state.getProperty(name); - - if (property instanceof SegmentPropertyState - && store.containsSegment(((SegmentPropertyState) property).getRecordId().getSegmentId())) { - pIds.add(((SegmentPropertyState) property).getRecordId()); - } else if (before == null - || !store.containsSegment(before.getRecordId().getSegmentId())) { - pIds.add(writeProperty(property)); - } else { - // reuse previously stored property, if possible - PropertyTemplate bt = beforeTemplate.getPropertyTemplate(name); - if (bt == null) { - pIds.add(writeProperty(property)); // new property - } else { - SegmentPropertyState bp = beforeTemplate.getProperty( - before.getRecordId(), bt.getIndex()); - if (property.equals(bp)) { - pIds.add(bp.getRecordId()); // no changes - } else if (bp.isArray() && bp.getType() != BINARIES) { - // reuse entries from the previous list - pIds.add(writeProperty(property, bp.getValueRecords())); - } else { - pIds.add(writeProperty(property)); - } - } - } - } - - if (!pIds.isEmpty()) { - if (version.onOrAfter(V_11)) { - ids.add(writeList(pIds)); - } else { - ids.addAll(pIds); - } - } - return writeRecord(newNodeStateWriter(ids)); - } - - /** - * If the given node was compacted, return the compacted node, otherwise - * return the passed node. This is to avoid pointing to old nodes, if they - * have been compacted. - * - * @param state the node - * @return the compacted node (if it was compacted) - */ - private SegmentNodeState uncompact(SegmentNodeState state) { - RecordId id = store.getTracker().getCompactionMap().get(state.getRecordId()); - if (id != null) { - return new SegmentNodeState(id); - } else { - return state; - } - } - - private T writeRecord(RecordWriter recordWriter) throws IOException { - SegmentBufferWriter writer = segmentBufferWriterPool.borrowWriter(currentThread()); - try { - return recordWriter.write(writer); - } finally { - segmentBufferWriterPool.returnWriter(currentThread(), writer); - } - } - - private class SegmentBufferWriterPool { - private final Set borrowed = newHashSet(); - private final Map writers = newHashMap(); - - private short writerId = -1; - - public void flush() throws IOException { - List toFlush = newArrayList(); - synchronized (this) { - toFlush.addAll(writers.values()); - writers.clear(); - borrowed.clear(); - } - // Call flush from outside a synchronized context to avoid - // deadlocks of that method calling SegmentStore.writeSegment - for (SegmentBufferWriter writer : toFlush) { - writer.flush(); - } - } - - public synchronized SegmentBufferWriter borrowWriter(Object key) throws IOException { - SegmentBufferWriter writer = writers.remove(key); - if (writer == null) { - writer = new SegmentBufferWriter(store, version, wid + "." + getWriterId()); - } - borrowed.add(writer); - return writer; - } - - public void returnWriter(Object key, SegmentBufferWriter writer) throws IOException { - if (!tryReturn(key, writer)) { - // Delayed flush this writer as it was borrowed while flush() was called. - writer.flush(); - } - } - - private synchronized boolean tryReturn(Object key, SegmentBufferWriter writer) { - if (borrowed.remove(writer)) { - writers.put(key, writer); - return true; - } else { - return false; - } - } - - private synchronized String getWriterId() { - if (++writerId > 9999) { - writerId = 0; - } - // Manually padding seems to be fastest here - if (writerId < 10) { - return "000" + writerId; - } else if (writerId < 100) { - return "00" + writerId; - } else if (writerId < 1000) { - return "0" + writerId; - } else { - return valueOf(writerId); - } - } - } - - private class ChildNodeCollectorDiff extends DefaultNodeStateDiff { - private final Map childNodes = newHashMap(); - private IOException exception; - - @Override - public boolean childNodeAdded(String name, NodeState after) { - try { - childNodes.put(name, writeNode(after).getRecordId()); - } catch (IOException e) { - exception = e; - return false; - } - return true; - } - - @Override - public boolean childNodeChanged( - String name, NodeState before, NodeState after) { - try { - childNodes.put(name, writeNode(after).getRecordId()); - } catch (IOException e) { - exception = e; - return false; - } - return true; - } - - @Override - public boolean childNodeDeleted(String name, NodeState before) { - childNodes.put(name, null); - return true; - } - - public Map diff(SegmentNodeState before, ModifiedNodeState after) throws IOException { - after.compareAgainstBaseState(before, this); - if (exception != null) { - throw new IOException(exception); - } else { - return childNodes; - } - } - } -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/StringCache.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/StringCache.java deleted file mode 100644 index dd75859..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/StringCache.java +++ /dev/null @@ -1,277 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import java.util.Arrays; - -import javax.annotation.Nonnull; - -import com.google.common.base.Function; -import org.apache.jackrabbit.oak.cache.CacheLIRS; -import org.apache.jackrabbit.oak.cache.CacheStats; -import static org.apache.jackrabbit.oak.commons.StringUtils.estimateMemoryUsage; - -/** - * A string cache. It has two components: a fast cache for small strings, based - * on an array, and a slow cache that uses a LIRS cache. - */ -@Deprecated -public class StringCache { - - /** - * The fast (array based) cache. - */ - private final FastCache fastCache; - - /** - * The slower (LIRS) cache. - */ - private final CacheLIRS cache; - - /** - * Create a new string cache. - * - * @param maxSize the maximum memory in bytes. - */ - StringCache(long maxSize) { - if (maxSize >= 0) { - fastCache = new FastCache(); - cache = CacheLIRS.newBuilder() - .module("StringCache") - .maximumWeight(maxSize) - .averageWeight(250) - .build(); - } else { - fastCache = null; - // dummy cache to prevent NPE on the getStats() call - cache = CacheLIRS. newBuilder() - .module("StringCache") - .maximumSize(1) - .build(); - } - } - - @Nonnull - @Deprecated - public CacheStats getStats() { - return new CacheStats(cache, "String Cache", null, -1); - } - - /** - * Get the string, loading it if necessary. - * - * @param msb the msb of the segment - * @param lsb the lsb of the segment - * @param offset the offset - * @param loader the string loader function - * @return the string (never null) - */ - @Deprecated - public String getString(long msb, long lsb, int offset, Function loader) { - int hash = getEntryHash(msb, lsb, offset); - if (fastCache == null) { - // disabled cache - return loader.apply(offset); - } - - String s = fastCache.getString(hash, msb, lsb, offset); - if (s != null) { - return s; - } - StringCacheKey key = new StringCacheKey(hash, msb, lsb, offset); - s = cache.getIfPresent(key); - if (s == null) { - s = loader.apply(offset); - cache.put(key, s, getMemory(s)); - } - if (FastCache.isSmall(s)) { - fastCache.addString(hash, new FastCacheEntry(hash, msb, lsb, offset, s)); - } - return s; - } - - /** - * Clear the cache. - */ - @Deprecated - public void clear() { - if (fastCache != null) { - cache.invalidateAll(); - fastCache.clear(); - } - } - - /** - * Estimation includes the key's overhead, see {@link EmpiricalWeigher} for - * an example - */ - private static int getMemory(String s) { - int size = 168; // overhead for each cache entry - size += 40; // key - size += estimateMemoryUsage(s); // value - return size; - } - - private static int getEntryHash(long lsb, long msb, int offset) { - int hash = (int) (msb ^ lsb) + offset; - hash = ((hash >>> 16) ^ hash) * 0x45d9f3b; - return (hash >>> 16) ^ hash; - } - - /** - * A fast cache based on an array. - */ - static class FastCache { - - /** - * The maximum number of characters in string that are cached. - */ - static final int MAX_STRING_SIZE = 128; - - /** - * The number of entries in the cache. Must be a power of 2. - */ - private static final int CACHE_SIZE = 16 * 1024; - - /** - * The cache array. - */ - private final FastCacheEntry[] cache = new FastCacheEntry[CACHE_SIZE]; - - /** - * Get the string if it is stored. - * - * @param hash the hash - * @param msb - * @param lsb - * @param offset the offset - * @return the string, or null - */ - String getString(int hash, long msb, long lsb, int offset) { - int index = hash & (CACHE_SIZE - 1); - FastCacheEntry e = cache[index]; - if (e != null && e.matches(msb, lsb, offset)) { - return e.string; - } - return null; - } - - void clear() { - Arrays.fill(cache, null); - } - - /** - * Whether the entry is small, in which case it can be kept in the fast cache. - * - * @param s the string - * @return whether the entry is small - */ - static boolean isSmall(String s) { - return s.length() <= MAX_STRING_SIZE; - } - - void addString(int hash, FastCacheEntry entry) { - int index = hash & (CACHE_SIZE - 1); - cache[index] = entry; - } - - } - - private static class StringCacheKey { - private final int hash; - private final long msb, lsb; - private final int offset; - - StringCacheKey(int hash, long msb, long lsb, int offset) { - this.hash = hash; - this.msb = msb; - this.lsb = lsb; - this.offset = offset; - } - - @Override - public int hashCode() { - return hash; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } - if (!(other instanceof StringCacheKey)) { - return false; - } - StringCacheKey o = (StringCacheKey) other; - return o.hash == hash && o.msb == msb && o.lsb == lsb && - o.offset == offset; - } - - @Override - public String toString() { - StringBuilder buff = new StringBuilder(); - buff.append(Long.toHexString(msb)). - append(':').append(Long.toHexString(lsb)). - append('+').append(Integer.toHexString(offset)); - return buff.toString(); - } - - } - - private static class FastCacheEntry { - - private final int hash; - private final long msb, lsb; - private final int offset; - private final String string; - - FastCacheEntry(int hash, long msb, long lsb, int offset, String string) { - this.hash = hash; - this.msb = msb; - this.lsb = lsb; - this.offset = offset; - this.string = string; - } - - boolean matches(long msb, long lsb, int offset) { - return this.offset == offset && this.msb == msb && this.lsb == lsb; - } - - @Override - public int hashCode() { - return hash; - } - - @Override - public boolean equals(Object other) { - if (other == this) { - return true; - } - if (!(other instanceof FastCacheEntry)) { - return false; - } - FastCacheEntry o = (FastCacheEntry) other; - return o.hash == hash && o.msb == msb && o.lsb == lsb && - o.offset == offset; - } - - } - -} \ No newline at end of file diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Template.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Template.java deleted file mode 100644 index db7db24..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/Template.java +++ /dev/null @@ -1,353 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkElementIndex; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.MISSING_NODE; -import static org.apache.jackrabbit.oak.plugins.segment.Record.fastEquals; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.RECORD_ID_BYTES; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.V_11; - -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; - -import com.google.common.base.Objects; -import com.google.common.collect.Lists; - -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.plugins.memory.MemoryChildNodeEntry; -import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry; -import org.apache.jackrabbit.oak.spi.state.NodeState; - -/** - * The in-memory representation of a "hidden class" of a node; inspired by the - * Chrome V8 Javascript engine). - *

- * Templates are always read fully in-memory. - */ -@Deprecated -public class Template { - - static final short ZERO_CHILD_NODES_TYPE = 0; - - static final short SINGLE_CHILD_NODE_TYPE = 1; - - static final short MANY_CHILD_NODES_TYPE = 2; - - static final String ZERO_CHILD_NODES = null; - - static final String MANY_CHILD_NODES = ""; - - /** - * The {@code jcr:primaryType} property, if present as a single-valued - * {@code NAME} property. Otherwise {@code null}. - */ - @CheckForNull - private final PropertyState primaryType; - - /** - * The {@code jcr:mixinTypes} property, if present as a multi-valued - * {@code NAME} property. Otherwise {@code null}. - */ - @CheckForNull - private final PropertyState mixinTypes; - - /** - * Templates of all the properties of a node, excluding the - * above-mentioned {@code NAME}-valued type properties, if any. - */ - @Nonnull - private final PropertyTemplate[] properties; - - /** - * Name of the single child node, if the node contains just one child. - * Otherwise {@link #ZERO_CHILD_NODES} (i.e. {@code null}) if there are - * no children, or {@link #MANY_CHILD_NODES} if there are more than one. - */ - @CheckForNull - private final String childName; - - Template(PropertyState primaryType, PropertyState mixinTypes, - PropertyTemplate[] properties, String childName) { - this.primaryType = primaryType; - this.mixinTypes = mixinTypes; - if (properties != null) { - this.properties = properties; - Arrays.sort(this.properties); - } else { - this.properties = new PropertyTemplate[0]; - } - this.childName = childName; - } - - Template(NodeState state) { - PropertyState primary = null; - PropertyState mixins = null; - List templates = Lists.newArrayList(); - - for (PropertyState property : state.getProperties()) { - String name = property.getName(); - Type type = property.getType(); - if ("jcr:primaryType".equals(name) && type == Type.NAME) { - primary = property; - } else if ("jcr:mixinTypes".equals(name) && type == Type.NAMES) { - mixins = property; - } else { - templates.add(new PropertyTemplate(property)); - } - } - - this.primaryType = primary; - this.mixinTypes = mixins; - this.properties = - templates.toArray(new PropertyTemplate[templates.size()]); - Arrays.sort(properties); - - long count = state.getChildNodeCount(2); - if (count == 0) { - childName = ZERO_CHILD_NODES; - } else if (count == 1) { - childName = state.getChildNodeNames().iterator().next(); - checkState(childName != null && !childName.equals(MANY_CHILD_NODES)); - } else { - childName = MANY_CHILD_NODES; - } - } - - PropertyState getPrimaryType() { - return primaryType; - } - - PropertyState getMixinTypes() { - return mixinTypes; - } - - PropertyTemplate[] getPropertyTemplates() { - return properties; - } - - /** - * Returns the template of the named property, or {@code null} if no such - * property exists. Use the {@link #getPrimaryType()} and - * {@link #getMixinTypes()} for accessing the JCR type properties, as - * they don't have templates. - * - * @param name property name - * @return property template, or {@code} null if not found - */ - PropertyTemplate getPropertyTemplate(String name) { - int hash = name.hashCode(); - int index = 0; - while (index < properties.length - && properties[index].getName().hashCode() < hash) { - index++; - } - while (index < properties.length - && properties[index].getName().hashCode() == hash) { - if (name.equals(properties[index].getName())) { - return properties[index]; - } - index++; - } - return null; - } - - String getChildName() { - return childName; - } - - SegmentPropertyState getProperty(RecordId recordId, int index) { - checkElementIndex(index, properties.length); - Segment segment = checkNotNull(recordId).getSegment(); - - int offset = recordId.getOffset() + RECORD_ID_BYTES; - if (childName != ZERO_CHILD_NODES) { - offset += RECORD_ID_BYTES; - } - RecordId rid = null; - if (segment.getSegmentVersion().onOrAfter(V_11)) { - RecordId lid = segment.readRecordId(offset); - ListRecord props = new ListRecord(lid, properties.length); - rid = props.getEntry(index); - } else { - offset += index * RECORD_ID_BYTES; - rid = segment.readRecordId(offset); - } - return new SegmentPropertyState(rid, properties[index]); - } - - MapRecord getChildNodeMap(RecordId recordId) { - checkState(childName != ZERO_CHILD_NODES); - Segment segment = recordId.getSegment(); - int offset = recordId.getOffset() + RECORD_ID_BYTES; - RecordId childNodesId = segment.readRecordId(offset); - return segment.readMap(childNodesId); - } - - @Deprecated - public NodeState getChildNode(String name, RecordId recordId) { - if (childName == ZERO_CHILD_NODES) { - return MISSING_NODE; - } else if (childName == MANY_CHILD_NODES) { - MapRecord map = getChildNodeMap(recordId); - MapEntry child = map.getEntry(name); - if (child != null) { - return child.getNodeState(); - } else { - return MISSING_NODE; - } - } else if (name.equals(childName)) { - Segment segment = recordId.getSegment(); - int offset = recordId.getOffset() + RECORD_ID_BYTES; - RecordId childNodeId = segment.readRecordId(offset); - return new SegmentNodeState(childNodeId); - } else { - return MISSING_NODE; - } - } - - Iterable getChildNodeEntries(RecordId recordId) { - if (childName == ZERO_CHILD_NODES) { - return Collections.emptyList(); - } else if (childName == MANY_CHILD_NODES) { - MapRecord map = getChildNodeMap(recordId); - return map.getEntries(); - } else { - Segment segment = recordId.getSegment(); - int offset = recordId.getOffset() + RECORD_ID_BYTES; - RecordId childNodeId = segment.readRecordId(offset); - return Collections.singletonList(new MemoryChildNodeEntry( - childName, new SegmentNodeState(childNodeId))); - } - } - - @Deprecated - public boolean compare(RecordId thisId, RecordId thatId) { - checkNotNull(thisId); - checkNotNull(thatId); - - // Compare properties - for (int i = 0; i < properties.length; i++) { - PropertyState thisProperty = getProperty(thisId, i); - PropertyState thatProperty = getProperty(thatId, i); - if (!thisProperty.equals(thatProperty)) { - return false; - } - } - - // Compare child nodes - if (childName == ZERO_CHILD_NODES) { - return true; - } else if (childName != MANY_CHILD_NODES) { - NodeState thisChild = getChildNode(childName, thisId); - NodeState thatChild = getChildNode(childName, thatId); - return thisChild.equals(thatChild); - } else { - // TODO: Leverage the HAMT data structure for the comparison - MapRecord thisMap = getChildNodeMap(thisId); - MapRecord thatMap = getChildNodeMap(thatId); - if (fastEquals(thisMap, thatMap)) { - return true; // shortcut - } else if (thisMap.size() != thatMap.size()) { - return false; // shortcut - } else { - // TODO: can this be optimized? - for (MapEntry entry : thisMap.getEntries()) { - String name = entry.getName(); - MapEntry thatEntry = thatMap.getEntry(name); - if (thatEntry == null) { - return false; - } else if (!entry.getNodeState().equals(thatEntry.getNodeState())) { - return false; - } - } - return true; - } - } - } - - //------------------------------------------------------------< Object >-- - - @Override - @Deprecated - public boolean equals(Object object) { - if (this == object) { - return true; - } else if (object instanceof Template) { - Template that = (Template) object; - return Objects.equal(primaryType, that.primaryType) - && Objects.equal(mixinTypes, that.mixinTypes) - && Arrays.equals(properties, that.properties) - && Objects.equal(childName, that.childName); - } else { - return false; - } - } - - @Override - @Deprecated - public int hashCode() { - return Objects.hashCode(primaryType, mixinTypes, - Arrays.asList(properties), getTemplateType(), childName); - } - - @Override - @Deprecated - public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append("{ "); - if (primaryType != null) { - builder.append(primaryType); - builder.append(", "); - } - if (mixinTypes != null) { - builder.append(mixinTypes); - builder.append(", "); - } - for (int i = 0; i < properties.length; i++) { - builder.append(properties[i]); - builder.append(" = ?, "); - } - if (childName == ZERO_CHILD_NODES) { - builder.append(""); - } else if (childName == MANY_CHILD_NODES) { - builder.append(""); - } else { - builder.append(childName + " = "); - } - builder.append(" }"); - return builder.toString(); - } - - short getTemplateType() { - if (childName == ZERO_CHILD_NODES) { - return ZERO_CHILD_NODES_TYPE; - } else if (childName == MANY_CHILD_NODES) { - return MANY_CHILD_NODES_TYPE; - } else { - return SINGLE_CHILD_NODE_TYPE; - } - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/compaction/CompactionStrategy.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/compaction/CompactionStrategy.java deleted file mode 100644 index 99fd288..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/compaction/CompactionStrategy.java +++ /dev/null @@ -1,358 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.compaction; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; -import static java.lang.System.currentTimeMillis; - -import java.util.concurrent.Callable; - -import javax.annotation.Nonnull; - -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Deprecated -public abstract class CompactionStrategy { - private static final Logger LOG = LoggerFactory.getLogger(CompactionStrategy.class); - - @Deprecated - public enum CleanupType { - - /** - * {@code CLEAN_ALL} must be used in conjunction with {@code cloneBinaries} - * otherwise segments can go away ({@code SegmentNotFoundException}) - *

- * Pros: best compaction results - *

- * Cons: larger repo size during compaction (2x). High chances that a currently - * running diff (e.g. observation) fails with {@code SegmentNotFoundException}. - */ - @Deprecated - CLEAN_ALL, - - @Deprecated - CLEAN_NONE, - - /** - * {@code CLEAN_OLD} with {@code cloneBinaries} - *

- * Pros: better compaction results - *

- * Cons: larger repo size {@code during} compaction (2x). {@code SegmentNotFoundException} - * with insufficiently large values for {@code olderThan}. - *

- * {@code CLEAN_OLD} without {@code cloneBinaries} - *

- * Pros: weakest compaction results, smaller size during compaction (1x + size of - * data-segments). - *

- * Cons: {@code SegmentNotFoundException} with insufficiently large values for - * {@code olderThan}. - */ - @Deprecated - CLEAN_OLD - } - - @Deprecated - public static final boolean PAUSE_DEFAULT = true; - - @Deprecated - public static final boolean CLONE_BINARIES_DEFAULT = false; - - @Deprecated - public static final CleanupType CLEANUP_DEFAULT = CleanupType.CLEAN_OLD; - - @Deprecated - public static final long TIMESTAMP_DEFAULT = 1000 * 60 * 60 * 10; // 10h - - @Deprecated - public static final byte MEMORY_THRESHOLD_DEFAULT = 5; - - @Deprecated - public static final boolean PERSIST_COMPACTION_MAP_DEFAULT = true; - - @Deprecated - public static final byte GAIN_THRESHOLD_DEFAULT = 10; - - /** - * Default value for {@link #getRetryCount()} - */ - @Deprecated - public static final int RETRY_COUNT_DEFAULT = 5; - - /** - * Default value for {@link #getForceAfterFail()} - */ - @Deprecated - public static final boolean FORCE_AFTER_FAIL_DEFAULT = false; - - /** - * No compaction at all - */ - @Deprecated - public static final CompactionStrategy NO_COMPACTION = new CompactionStrategy( - true, false, CleanupType.CLEAN_NONE, 0, MEMORY_THRESHOLD_DEFAULT) { - @Override - public boolean compacted(@Nonnull Callable setHead) throws Exception { - return false; - } - }; - - private boolean paused; - - private boolean cloneBinaries; - - @Nonnull - private CleanupType cleanupType; - - /** - * anything that has a lifetime bigger than this will be removed. a value of - * 0 (or very small) acts like a CLEANUP.NONE, a value of -1 (or negative) - * acts like a CLEANUP.ALL - * - */ - private long olderThan; - - private byte memoryThreshold = MEMORY_THRESHOLD_DEFAULT; - - private boolean persistedCompactionMap = PERSIST_COMPACTION_MAP_DEFAULT; - - private int retryCount = RETRY_COUNT_DEFAULT; - - private boolean forceAfterFail = FORCE_AFTER_FAIL_DEFAULT; - - private long compactionStart = currentTimeMillis(); - - /** - * Compaction gain estimate threshold beyond which compaction should run - */ - private byte gainThreshold = GAIN_THRESHOLD_DEFAULT; - - /** - * Flag that allows turning on an optimized version of the compaction - * process in the case of offline compaction - */ - private boolean offlineCompaction = false; - - @Deprecated - protected CompactionStrategy(boolean paused, - boolean cloneBinaries, @Nonnull CleanupType cleanupType, long olderThan, byte memoryThreshold) { - checkArgument(olderThan >= 0); - this.paused = paused; - this.cloneBinaries = cloneBinaries; - this.cleanupType = checkNotNull(cleanupType); - this.olderThan = olderThan; - this.memoryThreshold = memoryThreshold; - } - - @Deprecated - public boolean canRemove(SegmentId id) { - switch (cleanupType) { - case CLEAN_ALL: - return true; - case CLEAN_NONE: - return false; - case CLEAN_OLD: - long age = compactionStart - id.getCreationTime(); - if (age > olderThan) { - LOG.info("TarMK released segment {} for gc. Age={}", id, age); - return true; - } else { - return false; - } - } - return false; - } - - @Deprecated - public boolean cloneBinaries() { - return cloneBinaries; - } - - @Deprecated - public boolean isPaused() { - return paused; - } - - @Deprecated - public void setPaused(boolean paused) { - this.paused = paused; - } - - @Deprecated - public void setCloneBinaries(boolean cloneBinaries) { - this.cloneBinaries = cloneBinaries; - } - - @Deprecated - public void setCleanupType(@Nonnull CleanupType cleanupType) { - this.cleanupType = checkNotNull(cleanupType); - } - - @Deprecated - public void setOlderThan(long olderThan) { - checkArgument(olderThan >= 0); - this.olderThan = olderThan; - } - - String getCleanupType() { - return cleanupType.toString(); - } - - long getOlderThan() { - return olderThan; - } - - - @Override - @Deprecated - public String toString() { - return "CompactionStrategy{" + - "paused=" + paused + - ", cloneBinaries=" + cloneBinaries + - ", cleanupType=" + cleanupType + - ", olderThan=" + olderThan + - ", memoryThreshold=" + memoryThreshold + - ", persistedCompactionMap=" + persistedCompactionMap + - ", retryCount=" + retryCount + - ", forceAfterFail=" + forceAfterFail + - ", compactionStart=" + compactionStart + - ", offlineCompaction=" + offlineCompaction + - '}'; - } - - @Deprecated - public void setCompactionStart(long ms) { - this.compactionStart = ms; - } - - @Deprecated - public byte getMemoryThreshold() { - return memoryThreshold; - } - - @Deprecated - public void setMemoryThreshold(byte memoryThreshold) { - this.memoryThreshold = memoryThreshold; - } - - @Deprecated - public boolean getPersistCompactionMap() { - return persistedCompactionMap; - } - - @Deprecated - public void setPersistCompactionMap(boolean persist) { - persistedCompactionMap = persist; - } - - /** - * Get whether or not to force compact concurrent commits on top of already - * compacted commits after the maximum number of retries has been reached. - * Force committing tries to exclusively write lock the node store. - * @return {@code true} if force commit is on, {@code false} otherwise - */ - @Deprecated - public boolean getForceAfterFail() { - return forceAfterFail; - } - - /** - * Set whether or not to force compact concurrent commits on top of already - * compacted commits after the maximum number of retries has been reached. - * Force committing tries to exclusively write lock the node store. - * @param forceAfterFail - */ - @Deprecated - public void setForceAfterFail(boolean forceAfterFail) { - this.forceAfterFail = forceAfterFail; - } - - /** - * Get the number of tries to compact concurrent commits on top of already - * compacted commits - * @return retry count - */ - @Deprecated - public int getRetryCount() { - return retryCount; - } - - /** - * Set the number of tries to compact concurrent commits on top of already - * compacted commits - * @param retryCount - */ - @Deprecated - public void setRetryCount(int retryCount) { - this.retryCount = retryCount; - } - - /** - * Get the compaction gain estimate threshold beyond which compaction should - * run - * @return gainThreshold - */ - @Deprecated - public byte getGainThreshold() { - return gainThreshold; - } - - /** - * Set the compaction gain estimate threshold beyond which compaction should - * run - * @param gainThreshold - */ - @Deprecated - public void setGainThreshold(byte gainThreshold) { - this.gainThreshold = gainThreshold; - } - - @Deprecated - public abstract boolean compacted(@Nonnull Callable setHead) throws Exception; - - @Deprecated - public boolean isOfflineCompaction() { - return offlineCompaction; - } - - @Deprecated - public void setOfflineCompaction(boolean offlineCompaction) { - this.offlineCompaction = offlineCompaction; - } - - /** - * Check if the approximate repository size is getting too big compared with - * the available space on disk. - * - * @param repositoryDiskSpace Approximate size of the disk space occupied by - * the repository. - * @param availableDiskSpace Currently available disk space. - * @return {@code true} if the available disk space is considered enough for - * normal repository operations. - */ - @Deprecated - public boolean isDiskSpaceSufficient(long repositoryDiskSpace, long availableDiskSpace) { - return availableDiskSpace > 0.25 * repositoryDiskSpace; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/compaction/CompactionStrategyMBean.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/compaction/CompactionStrategyMBean.java deleted file mode 100644 index f125b2a..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/compaction/CompactionStrategyMBean.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.compaction; - -@Deprecated -public interface CompactionStrategyMBean { - - @Deprecated - String TYPE = "CompactionStrategy"; - - @Deprecated - boolean isCloneBinaries(); - - @Deprecated - void setCloneBinaries(boolean cloneBinaries); - - @Deprecated - boolean isPausedCompaction(); - - @Deprecated - void setPausedCompaction(boolean pausedCompaction); - - @Deprecated - String getCleanupStrategy(); - - @Deprecated - void setCleanupStrategy(String cleanup); - - @Deprecated - long getOlderThan(); - - @Deprecated - void setOlderThan(long olderThan); - - @Deprecated - byte getMemoryThreshold(); - - @Deprecated - void setMemoryThreshold(byte memory); - - /** - * Get whether or not to force compact concurrent commits on top of already - * compacted commits after the maximum number of retries has been reached. - * Force committing tries to exclusively write lock the node store. - * @return {@code true} if force commit is on, {@code false} otherwise - */ - @Deprecated - boolean getForceAfterFail(); - - /** - * Set whether or not to force compact concurrent commits on top of already - * compacted commits after the maximum number of retries has been reached. - * Force committing tries to exclusively write lock the node store. - * @param forceAfterFail - */ - @Deprecated - void setForceAfterFail(boolean forceAfterFail); - - /** - * Get the number of tries to compact concurrent commits on top of already - * compacted commits - * @return retry count - */ - @Deprecated - int getRetryCount(); - - /** - * Set the number of tries to compact concurrent commits on top of already - * compacted commits - * @param retryCount - */ - @Deprecated - void setRetryCount(int retryCount); - - /** - * Get the compaction gain estimate threshold beyond which compaction should - * run - * @return gainThreshold - */ - @Deprecated - byte getGainThreshold(); - - /** - * Set the compaction gain estimate threshold beyond which compaction should - * run - * @param gainThreshold - */ - @Deprecated - void setGainThreshold(byte gainThreshold); - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/compaction/DefaultCompactionStrategyMBean.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/compaction/DefaultCompactionStrategyMBean.java deleted file mode 100644 index 576e9d2..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/compaction/DefaultCompactionStrategyMBean.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.compaction; - -import org.apache.jackrabbit.oak.commons.jmx.AnnotatedStandardMBean; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.CleanupType; - -@Deprecated -public class DefaultCompactionStrategyMBean - extends AnnotatedStandardMBean - implements CompactionStrategyMBean { - - private final CompactionStrategy strategy; - - @Deprecated - public DefaultCompactionStrategyMBean(CompactionStrategy strategy) { - super(CompactionStrategyMBean.class); - this.strategy = strategy; - } - - @Override - @Deprecated - public boolean isCloneBinaries() { - return strategy.cloneBinaries(); - } - - @Override - @Deprecated - public void setCloneBinaries(boolean cloneBinaries) { - strategy.setCloneBinaries(cloneBinaries); - } - - @Override - @Deprecated - public boolean isPausedCompaction() { - return strategy.isPaused(); - } - - @Override - @Deprecated - public void setPausedCompaction(boolean pausedCompaction) { - strategy.setPaused(pausedCompaction); - } - - @Override - @Deprecated - public String getCleanupStrategy() { - return strategy.getCleanupType(); - } - - @Override - @Deprecated - public void setCleanupStrategy(String cleanup) { - strategy.setCleanupType(CleanupType.valueOf(cleanup)); - } - - @Override - @Deprecated - public long getOlderThan() { - return strategy.getOlderThan(); - } - - @Override - @Deprecated - public void setOlderThan(long olderThan) { - strategy.setOlderThan(olderThan); - } - - @Override - @Deprecated - public byte getMemoryThreshold() { - return strategy.getMemoryThreshold(); - } - - @Override - @Deprecated - public void setMemoryThreshold(byte memory) { - strategy.setMemoryThreshold(memory); - } - - @Override - @Deprecated - public boolean getForceAfterFail() { - return strategy.getForceAfterFail(); - } - - @Override - @Deprecated - public void setForceAfterFail(boolean forceAfterFail) { - strategy.setForceAfterFail(forceAfterFail); - } - - @Override - @Deprecated - public int getRetryCount() { - return strategy.getRetryCount(); - } - - @Override - @Deprecated - public void setRetryCount(int retryCount) { - strategy.setRetryCount(retryCount); - } - - @Override - @Deprecated - public byte getGainThreshold() { - return strategy.getGainThreshold(); - } - - @Override - @Deprecated - public void setGainThreshold(byte gainThreshold) { - strategy.setGainThreshold(gainThreshold); - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/BackgroundThread.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/BackgroundThread.java deleted file mode 100644 index d79615e..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/BackgroundThread.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static java.lang.System.currentTimeMillis; - -import java.io.Closeable; -import java.util.Date; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A small wrapper around the Thread class that periodically calls a runnable. - * Please note the Runnable.run() method is not supposed to loop itself, instead - * it should just do one operation. This class calls Runnable.run() repeatedly. - * This class also measures and logs the time taken by the Runnable.run() - * method. - */ -class BackgroundThread extends Thread implements Closeable { - - /** Logger instance */ - private static final Logger log = - LoggerFactory.getLogger(BackgroundThread.class); - - private final String name; - - private final long interval; - - private boolean alive = true; - - private long iterations = 0; - - private long sumDuration = 0; - - private long maxDuration = 0; - - private BackgroundThread(String name, long interval, Runnable target) { - super(target, name); - - this.name = name; - this.interval = interval; - - setDaemon(true); - setPriority(MIN_PRIORITY); - } - - /** - * Run a {@code task} in an regular {@code interval} in the background - * (i.e. {@link Thread#MIN_PRIORITY}. - * @param name name of the thread - * @param interval interval in milliseconds - * @param task task to run - * @return the {@code BackgroundThread} instance running {@code task}. - */ - public static BackgroundThread run(String name, long interval, Runnable task) { - BackgroundThread t = new BackgroundThread(name, interval, task); - t.start(); - return t; - } - - @Override - public void run() { - try { - while (waitUntilNextIteration()) { - setName(name + ", active since " + new Date() - + ", previous max duration " + maxDuration + "ms"); - - long start = currentTimeMillis(); - super.run(); - long duration = currentTimeMillis() - start; - - iterations++; - sumDuration += duration; - maxDuration = Math.max(maxDuration, duration); - - // make execution statistics visible in thread dumps - setName(name - + ", avg " + (sumDuration / iterations) + "ms" - + ", max " + maxDuration + "ms"); - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - log.error(name + " interrupted", e); - } catch (RuntimeException e) { - log.error("Unhandled exception in background thread", e); - throw e; - } - } - - void trigger() { - trigger(false); - } - - @Override - public void close() { - try { - trigger(true); - join(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - log.error(name + " join interrupted", e); - } - } - - private synchronized void trigger(boolean close) { - if (close) { - alive = false; - } - notify(); - } - - private synchronized boolean waitUntilNextIteration() - throws InterruptedException { - if (alive) { - if (interval < 0) { - wait(); - } else { - wait(interval); - } - } - return alive; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/CompactionGainEstimate.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/CompactionGainEstimate.java deleted file mode 100644 index 8ef7cba..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/CompactionGainEstimate.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static org.apache.jackrabbit.oak.api.Type.BINARIES; - -import java.io.File; -import java.util.UUID; - -import com.google.common.base.Supplier; -import com.google.common.hash.BloomFilter; -import com.google.common.hash.Funnel; -import com.google.common.hash.PrimitiveSink; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.plugins.segment.RecordIdSet; -import org.apache.jackrabbit.oak.plugins.segment.SegmentBlob; -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState; -import org.apache.jackrabbit.oak.plugins.segment.SegmentPropertyState; -import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry; - -class CompactionGainEstimate implements TarEntryVisitor { - - private static final Funnel UUID_FUNNEL = new Funnel() { - @Override - public void funnel(UUID from, PrimitiveSink into) { - into.putLong(from.getMostSignificantBits()); - into.putLong(from.getLeastSignificantBits()); - } - }; - - private final BloomFilter uuids; - - private long totalSize = 0; - - private long reachableSize = 0; - - /** - * Create a new instance of gain estimator. The estimation process can be stopped - * by switching the supplier {@code stop} to {@code true}, in which case the returned - * estimates are undefined. - * - * @param node root node state - * @param estimatedBulkCount - * @param stop stop signal - */ - CompactionGainEstimate(SegmentNodeState node, int estimatedBulkCount, Supplier stop) { - uuids = BloomFilter.create(UUID_FUNNEL, estimatedBulkCount); - collectReferencedSegments(node, new RecordIdSet(), stop); - } - - private void collectReferencedSegments(SegmentNodeState node, RecordIdSet visited, Supplier stop) { - if (!stop.get() && visited.addIfNotPresent(node.getRecordId())) { - collectUUID(node.getRecordId().getSegmentId()); - for (PropertyState property : node.getProperties()) { - if (property instanceof SegmentPropertyState) { - collectUUID(((SegmentPropertyState) property) - .getRecordId().getSegmentId()); - } - - // Get the underlying value as stream so we can collect - // the segments ids involved in storing the value. - // This works as primitives are stored as strings and strings - // as binaries of their UTF-8 encoding. - for (Blob blob : property.getValue(BINARIES)) { - for (SegmentId id : SegmentBlob.getBulkSegmentIds(blob)) { - collectUUID(id); - } - } - } - for (ChildNodeEntry child : node.getChildNodeEntries()) { - collectReferencedSegments((SegmentNodeState) child.getNodeState(), - visited, stop); - } - } - } - - private void collectUUID(SegmentId segmentId) { - uuids.put(new UUID( - segmentId.getMostSignificantBits(), - segmentId.getLeastSignificantBits())); - } - - /** - * Returns a percentage estimate (scale 0-100) for how much disk space - * running compaction (and cleanup) could potentially release. - * - * @param offset number of bytes to offset the reachable size with - * @return percentage of disk space that could be freed with compaction - */ - public long estimateCompactionGain(long offset) { - if (totalSize == 0) { - return 0; - } - return 100 * (totalSize - reachableSize - offset) / totalSize; - } - - public long getTotalSize() { - return totalSize; - } - - public long getReachableSize() { - return reachableSize; - } - - // ---------------------------------------------------< TarEntryVisitor >-- - - @Override - public void visit(long msb, long lsb, File file, int offset, int size) { - UUID uuid = new UUID(msb, lsb); - int entrySize = TarReader.getEntrySize(size); - totalSize += entrySize; - if (uuids.mightContain(uuid)) { - reachableSize += entrySize; - } - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileAccess.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileAccess.java deleted file mode 100644 index e42c0f5..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileAccess.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static com.google.common.base.Preconditions.checkState; -import static java.nio.channels.FileChannel.MapMode.READ_ONLY; - -import java.io.IOException; -import java.io.RandomAccessFile; -import java.nio.ByteBuffer; -import java.nio.MappedByteBuffer; - -/** - * A wrapper around either memory mapped files or random access files, to allow - * reading from a file. - */ -abstract class FileAccess { - - abstract boolean isMemoryMapped(); - - abstract int length() throws IOException; - - abstract ByteBuffer read(int position, int length) throws IOException; - - abstract void close() throws IOException; - - //-----------------------------------------------------------< private >-- - - /** - * The implementation that uses memory mapped files. - */ - static class Mapped extends FileAccess { - - private final RandomAccessFile file; - - private MappedByteBuffer buffer; - - Mapped(RandomAccessFile file) throws IOException { - this.file = file; - this.buffer = file.getChannel().map(READ_ONLY, 0, file.length()); - } - - @Override - boolean isMemoryMapped() { - return true; - } - - @Override - public int length() { - return buffer.remaining(); - } - - @Override - public ByteBuffer read(int position, int length) { - ByteBuffer entry = buffer.asReadOnlyBuffer(); - entry.position(entry.position() + position); - entry.limit(entry.position() + length); - return entry.slice(); - } - - @Override - public void close() throws IOException { - buffer = null; - file.close(); - } - - } - - /** - * The implementation that uses random access file (reads are synchronized). - */ - static class Random extends FileAccess { - - private final RandomAccessFile file; - - Random(RandomAccessFile file) { - this.file = file; - } - - @Override - boolean isMemoryMapped() { - return false; - } - - @Override - public synchronized int length() throws IOException { - long length = file.length(); - checkState(length < Integer.MAX_VALUE); - return (int) length; - } - - @Override - public synchronized ByteBuffer read(int position, int length) - throws IOException { - ByteBuffer entry = ByteBuffer.allocate(length); - file.seek(position); - file.readFully(entry.array()); - return entry; - } - - @Override - public synchronized void close() throws IOException { - file.close(); - } - - } - -} \ No newline at end of file diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java deleted file mode 100644 index f5c0118..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStore.java +++ /dev/null @@ -1,1618 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Lists.newArrayListWithCapacity; -import static com.google.common.collect.Lists.newLinkedList; -import static com.google.common.collect.Maps.newHashMap; -import static com.google.common.collect.Maps.newLinkedHashMap; -import static com.google.common.collect.Sets.newHashSet; -import static java.lang.String.format; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.MINUTES; -import static org.apache.jackrabbit.oak.commons.IOUtils.humanReadableByteCount; -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; -import static org.apache.jackrabbit.oak.plugins.segment.CompactionMap.sum; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.NO_COMPACTION; - -import java.io.Closeable; -import java.io.File; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.nio.ByteBuffer; -import java.nio.channels.FileLock; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import javax.annotation.Nonnull; - -import com.google.common.base.Stopwatch; -import com.google.common.base.Supplier; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.plugins.blob.BlobStoreBlob; -import org.apache.jackrabbit.oak.plugins.segment.CompactionMap; -import org.apache.jackrabbit.oak.plugins.segment.Compactor; -import org.apache.jackrabbit.oak.plugins.segment.PersistedCompactionMap; -import org.apache.jackrabbit.oak.plugins.segment.RecordId; -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentGraph.SegmentGraphVisitor; -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNotFoundException; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentTracker; -import org.apache.jackrabbit.oak.plugins.segment.SegmentVersion; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.spi.gc.GCMonitor; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.stats.StatisticsProvider; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The storage implementation for tar files. - */ -@Deprecated -public class FileStore implements SegmentStore { - - /** Logger instance */ - private static final Logger log = LoggerFactory.getLogger(FileStore.class); - - private static final int MB = 1024 * 1024; - - private static final Pattern FILE_NAME_PATTERN = - Pattern.compile("(data|bulk)((0|[1-9][0-9]*)[0-9]{4})([a-z])?.tar"); - - private static final String FILE_NAME_FORMAT = "data%05d%s.tar"; - - private static final String JOURNAL_FILE_NAME = "journal.log"; - - private static final String LOCK_FILE_NAME = "repo.lock"; - - private static final String MANIFEST_FILE_NAME = "manifest"; - - /** - * GC counter for logging purposes - */ - private static final AtomicLong gcCount = new AtomicLong(0); - - static final boolean MEMORY_MAPPING_DEFAULT = - "64".equals(System.getProperty("sun.arch.data.model", "32")); - - private final SegmentTracker tracker; - - private final File directory; - - private final BlobStore blobStore; - - private final int maxFileSize; - - private final boolean memoryMapping; - - private volatile List readers; - - private int writeNumber; - - private File writeFile; - - private TarWriter writer; - - private final RandomAccessFile journalFile; - - private final RandomAccessFile lockFile; - - private final FileLock lock; - - /** - * The latest head state. - */ - private final AtomicReference head; - - /** - * The persisted head of the root journal, used to determine whether the - * latest {@link #head} value should be written to the disk. - */ - private final AtomicReference persistedHead; - - /** - * The background flush thread. Automatically flushes the TarMK state - * once every five seconds. - */ - private final BackgroundThread flushThread; - - /** - * The background compaction thread. Compacts the TarMK contents whenever - * triggered by the {@link #gc()} method. - */ - private final BackgroundThread compactionThread; - - /** - * This background thread periodically asks the {@code CompactionStrategy} - * to compare the approximate size of the repository with the available disk - * space. The result of this comparison is stored in the state of this - * {@code FileStore}. - */ - private final BackgroundThread diskSpaceThread; - - private CompactionStrategy compactionStrategy = NO_COMPACTION; - - /** - * Flag to request revision cleanup during the next flush. - */ - private final AtomicBoolean cleanupNeeded = new AtomicBoolean(false); - - /** - * List of old tar file generations that are waiting to be removed. They can - * not be removed immediately, because they first need to be closed, and the - * JVM needs to release the memory mapped file references. - */ - private final List pendingRemove = newLinkedList(); - - /** - * Version of the segment storage format. - */ - private final SegmentVersion version; - - /** - * {@code GCMonitor} monitoring this instance's gc progress - */ - private final GCMonitor gcMonitor; - - /** - * Represents the approximate size on disk of the repository. - */ - private final AtomicLong approximateSize; - - /** - * This flag is periodically updated by calling the {@code - * CompactionStrategy} at regular intervals. - */ - private final AtomicBoolean sufficientDiskSpace; - - /** - * Flag signalling shutdown of the file store - */ - private volatile boolean shutdown; - - private final ReadWriteLock fileStoreLock = new ReentrantReadWriteLock(); - - private final FileStoreStats stats; - - /** - * Create a new instance of a {@link Builder} for a file store. - * @param directory directory where the tar files are stored - * @return a new {@link Builder} instance. - */ - @Nonnull - @Deprecated - public static Builder builder(@Nonnull File directory) { - return new Builder(checkNotNull(directory)); - } - - /** - * Builder for creating {@link FileStore} instances. - */ - @Deprecated - public static class Builder { - - private final File directory; - - private BlobStore blobStore; // null -> store blobs inline - - private NodeState root = EMPTY_NODE; - - private int maxFileSize = 256; - - private int cacheSize; // 0 -> DEFAULT_MEMORY_CACHE_SIZE - - private boolean memoryMapping; - - private final LoggingGCMonitor gcMonitor = new LoggingGCMonitor(); - - private StatisticsProvider statsProvider = StatisticsProvider.NOOP; - - private SegmentVersion version = SegmentVersion.LATEST_VERSION; - - private Builder(File directory) { - this.directory = directory; - } - - /** - * Specify the {@link BlobStore}. - * @param blobStore - * @return this instance - */ - @Nonnull - @Deprecated - public Builder withBlobStore(@Nonnull BlobStore blobStore) { - this.blobStore = checkNotNull(blobStore); - return this; - } - - /** - * Specify the initial root node state for the file store - * @param root - * @return this instance - */ - @Nonnull - @Deprecated - public Builder withRoot(@Nonnull NodeState root) { - this.root = checkNotNull(root); - return this; - } - - /** - * Maximal size of the generated tar files in MB. - * @param maxFileSize - * @return this instance - */ - @Nonnull - @Deprecated - public Builder withMaxFileSize(int maxFileSize) { - this.maxFileSize = maxFileSize; - return this; - } - - /** - * Size of the cache in MB. - * @param cacheSize - * @return this instance - */ - @Nonnull - @Deprecated - public Builder withCacheSize(int cacheSize) { - this.cacheSize = cacheSize; - return this; - } - - /** - * Turn caching off - * @return this instance - */ - @Nonnull - @Deprecated - public Builder withNoCache() { - this.cacheSize = -1; - return this; - } - - /** - * Turn memory mapping on or off - * @param memoryMapping - * @return this instance - */ - @Nonnull - @Deprecated - public Builder withMemoryMapping(boolean memoryMapping) { - this.memoryMapping = memoryMapping; - return this; - } - - /** - * Set memory mapping to the default value based on OS properties - * @return this instance - */ - @Nonnull - @Deprecated - public Builder withDefaultMemoryMapping() { - this.memoryMapping = MEMORY_MAPPING_DEFAULT; - return this; - } - - /** - * {@link GCMonitor} for monitoring this files store's gc process. - * @param gcMonitor - * @return this instance - */ - @Nonnull - @Deprecated - public Builder withGCMonitor(@Nonnull GCMonitor gcMonitor) { - this.gcMonitor.delegatee = checkNotNull(gcMonitor); - return this; - } - - /** - * {@link StatisticsProvider} for collecting statistics related to FileStore - * @param statisticsProvider - * @return this instance - */ - @Nonnull - @Deprecated - public Builder withStatisticsProvider(@Nonnull StatisticsProvider statisticsProvider) { - this.statsProvider = checkNotNull(statisticsProvider); - return this; - } - - @Deprecated - public Builder withSegmentVersion(SegmentVersion version) { - this.version = checkNotNull(version); - return this; - } - - /** - * Create a new {@link FileStore} instance with the settings specified in this - * builder. If none of the {@code with} methods have been called before calling - * this method, a file store with the following default settings is returned: - *

    - *
  • blob store: inline
  • - *
  • root: empty node
  • - *
  • max file size: 256MB
  • - *
  • cache size: 256MB
  • - *
  • memory mapping: on for 64 bit JVMs off otherwise
  • - *
  • whiteboard: none. No {@link GCMonitor} tracking
  • - *
  • statsProvider: StatisticsProvider.NOOP
  • - *
- * - * @return a new file store instance - * @throws IOException - */ - @Nonnull - @Deprecated - public FileStore build() throws IOException, InvalidFileStoreVersionException { - return new FileStore(this, false); - } - - @Deprecated - public ReadOnlyStore buildReadOnly() throws IOException, InvalidFileStoreVersionException { - return new ReadOnlyStore(this); - } - - } - - private FileStore(Builder builder, boolean readOnly) throws IOException, InvalidFileStoreVersionException { - this.version = builder.version; - - if (readOnly) { - checkNotNull(builder.directory); - checkState(builder.directory.exists() && builder.directory.isDirectory()); - } else { - checkNotNull(builder.directory).mkdirs(); - } - - if (builder.cacheSize < 0) { - this.tracker = new SegmentTracker(this, 0, version); - } else if (builder.cacheSize > 0) { - this.tracker = new SegmentTracker(this, builder.cacheSize, version); - } else { - this.tracker = new SegmentTracker(this, version); - } - this.blobStore = builder.blobStore; - this.directory = builder.directory; - this.maxFileSize = builder.maxFileSize * MB; - this.memoryMapping = builder.memoryMapping; - this.gcMonitor = builder.gcMonitor; - - Map> map = collectFiles(directory); - - File manifest = new File(directory, MANIFEST_FILE_NAME); - - if (map.size() > 0) { - if (manifest.exists()) { - throw new InvalidFileStoreVersionException(); - } else { - log.debug("The store folder is non empty and does not have manifest file"); - } - } - - this.readers = newArrayListWithCapacity(map.size()); - Integer[] indices = map.keySet().toArray(new Integer[map.size()]); - Arrays.sort(indices); - for (int i = indices.length - 1; i >= 0; i--) { - if (!readOnly) { - readers.add(TarReader.open(map.get(indices[i]), memoryMapping)); - } else { - // only try to read-only recover the latest file as that might - // be the *only* one still being accessed by a writer - boolean recover = i == indices.length - 1; - readers.add(TarReader.openRO(map.get(indices[i]), - memoryMapping, recover)); - } - } - - long initialSize = size(); - this.approximateSize = new AtomicLong(initialSize); - this.stats = new FileStoreStats(builder.statsProvider, this, initialSize); - - if (!readOnly) { - if (indices.length > 0) { - this.writeNumber = indices[indices.length - 1] + 1; - } else { - this.writeNumber = 0; - } - this.writeFile = new File(directory, String.format( - FILE_NAME_FORMAT, writeNumber, "a")); - this.writer = new TarWriter(writeFile, stats); - } - - if (readOnly) { - journalFile = new RandomAccessFile(new File(directory, - JOURNAL_FILE_NAME), "r"); - } else { - journalFile = new RandomAccessFile(new File(directory, - JOURNAL_FILE_NAME), "rw"); - } - - RecordId id = null; - JournalReader journalReader = new JournalReader(new File(directory, JOURNAL_FILE_NAME)); - try { - Iterator heads = journalReader.iterator(); - while (id == null && heads.hasNext()) { - String head = heads.next(); - try { - RecordId last = RecordId.fromString(tracker, head); - SegmentId segmentId = last.getSegmentId(); - if (containsSegment( - segmentId.getMostSignificantBits(), - segmentId.getLeastSignificantBits())) { - id = last; - } else { - log.warn("Unable to access revision {}, rewinding...", last); - } - } catch (IllegalArgumentException e) { - log.warn("Skipping invalid record id {}", head); - } - } - } finally { - journalReader.close(); - } - - journalFile.seek(journalFile.length()); - - if (!readOnly) { - lockFile = new RandomAccessFile( - new File(directory, LOCK_FILE_NAME), "rw"); - lock = lockFile.getChannel().lock(); - } else { - lockFile = null; - lock = null; - } - - if (id != null) { - head = new AtomicReference(id); - persistedHead = new AtomicReference(id); - } else { - NodeBuilder nodeBuilder = EMPTY_NODE.builder(); - nodeBuilder.setChildNode("root", builder.root); - head = new AtomicReference(tracker.getWriter().writeNode( - nodeBuilder.getNodeState()).getRecordId()); - persistedHead = new AtomicReference(null); - } - - if (!readOnly) { - flushThread = BackgroundThread.run( - "TarMK flush thread [" + directory + "]", 5000, // 5s interval - new Runnable() { - @Override - public void run() { - try { - flush(); - } catch (IOException e) { - log.warn("Failed to flush the TarMK at" + - directory, e); - } - } - }); - compactionThread = BackgroundThread.run( - "TarMK compaction thread [" + directory + "]", -1, - new Runnable() { - @Override - public void run() { - try { - maybeCompact(true); - } catch (IOException e) { - log.error("Error running compaction", e); - } - } - }); - - diskSpaceThread = BackgroundThread.run( - "TarMK disk space check [" + directory + "]", MINUTES.toMillis(1), new Runnable() { - - @Override - public void run() { - checkDiskSpace(); - } - - }); - } else { - flushThread = null; - compactionThread = null; - diskSpaceThread = null; - } - - sufficientDiskSpace = new AtomicBoolean(true); - - if (readOnly) { - log.info("TarMK ReadOnly opened: {} (mmap={})", directory, - memoryMapping); - } else { - log.info("TarMK opened: {} (mmap={})", directory, memoryMapping); - } - log.debug("TarMK readers {}", this.readers); - } - - @Deprecated - public boolean maybeCompact(boolean cleanup) throws IOException { - gcMonitor.info("TarMK GC #{}: started", gcCount.incrementAndGet()); - - Runtime runtime = Runtime.getRuntime(); - long avail = runtime.totalMemory() - runtime.freeMemory(); - long[] weights = tracker.getCompactionMap().getEstimatedWeights(); - long delta = weights.length > 0 - ? weights[0] - : 0; - long needed = delta * compactionStrategy.getMemoryThreshold(); - if (needed >= avail) { - gcMonitor.skipped( - "TarMK GC #{}: not enough available memory {} ({} bytes), needed {} ({} bytes)," + - " last merge delta {} ({} bytes), so skipping compaction for now", - gcCount, - humanReadableByteCount(avail), avail, - humanReadableByteCount(needed), needed, - humanReadableByteCount(delta), delta); - if (cleanup) { - cleanupNeeded.set(!compactionStrategy.isPaused()); - } - return false; - } - - Stopwatch watch = Stopwatch.createStarted(); - compactionStrategy.setCompactionStart(System.currentTimeMillis()); - boolean compacted = false; - - long offset = compactionStrategy.getPersistCompactionMap() - ? sum(tracker.getCompactionMap().getRecordCounts()) * PersistedCompactionMap.BYTES_PER_ENTRY - : 0; - - byte gainThreshold = compactionStrategy.getGainThreshold(); - boolean runCompaction = true; - if (gainThreshold <= 0) { - gcMonitor.info("TarMK GC #{}: estimation skipped because gain threshold value ({} <= 0)", gcCount, - gainThreshold); - } else if (compactionStrategy.isPaused()) { - gcMonitor.info("TarMK GC #{}: estimation skipped because compaction is paused", gcCount); - } else { - gcMonitor.info("TarMK GC #{}: estimation started", gcCount); - Supplier shutdown = newShutdownSignal(); - CompactionGainEstimate estimate = estimateCompactionGain(shutdown); - if (shutdown.get()) { - gcMonitor.info("TarMK GC #{}: estimation interrupted. Skipping compaction.", gcCount); - return false; - } - - long gain = estimate.estimateCompactionGain(offset); - runCompaction = gain >= gainThreshold; - if (runCompaction) { - gcMonitor.info( - "TarMK GC #{}: estimation completed in {} ({} ms). " + - "Gain is {}% or {}/{} ({}/{} bytes), so running compaction", - gcCount, watch, watch.elapsed(MILLISECONDS), gain, - humanReadableByteCount(estimate.getReachableSize()), humanReadableByteCount(estimate.getTotalSize()), - estimate.getReachableSize(), estimate.getTotalSize()); - } else { - if (estimate.getTotalSize() == 0) { - gcMonitor.skipped( - "TarMK GC #{}: estimation completed in {} ({} ms). " + - "Skipping compaction for now as repository consists of a single tar file only", - gcCount, watch, watch.elapsed(MILLISECONDS)); - } else { - gcMonitor.skipped( - "TarMK GC #{}: estimation completed in {} ({} ms). " + - "Gain is {}% or {}/{} ({}/{} bytes), so skipping compaction for now", - gcCount, watch, watch.elapsed(MILLISECONDS), gain, - humanReadableByteCount(estimate.getReachableSize()), humanReadableByteCount(estimate.getTotalSize()), - estimate.getReachableSize(), estimate.getTotalSize()); - } - } - } - - if (runCompaction) { - if (!compactionStrategy.isPaused()) { - compact(); - compacted = true; - } else { - gcMonitor.skipped("TarMK GC #{}: compaction paused", gcCount); - } - } - if (cleanup) { - cleanupNeeded.set(!compactionStrategy.isPaused()); - } - return compacted; - } - - static Map> collectFiles(File directory) { - Map> dataFiles = newHashMap(); - Map bulkFiles = newHashMap(); - - for (File file : directory.listFiles()) { - Matcher matcher = FILE_NAME_PATTERN.matcher(file.getName()); - if (matcher.matches()) { - Integer index = Integer.parseInt(matcher.group(2)); - if ("data".equals(matcher.group(1))) { - Map files = dataFiles.get(index); - if (files == null) { - files = newHashMap(); - dataFiles.put(index, files); - } - Character generation = 'a'; - if (matcher.group(4) != null) { - generation = matcher.group(4).charAt(0); - } - checkState(files.put(generation, file) == null); - } else { - checkState(bulkFiles.put(index, file) == null); - } - } - } - - if (!bulkFiles.isEmpty()) { - log.info("Upgrading TarMK file names in {}", directory); - - if (!dataFiles.isEmpty()) { - // first put all the data segments at the end of the list - Integer[] indices = - dataFiles.keySet().toArray(new Integer[dataFiles.size()]); - Arrays.sort(indices); - int position = Math.max( - indices[indices.length - 1] + 1, - bulkFiles.size()); - for (Integer index : indices) { - Map files = dataFiles.remove(index); - Integer newIndex = position++; - for (Character generation : newHashSet(files.keySet())) { - File file = files.get(generation); - File newFile = new File( - directory, - format(FILE_NAME_FORMAT, newIndex, generation)); - log.info("Renaming {} to {}", file, newFile); - file.renameTo(newFile); - files.put(generation, newFile); - } - dataFiles.put(newIndex, files); - } - } - - // then add all the bulk segments at the beginning of the list - Integer[] indices = - bulkFiles.keySet().toArray(new Integer[bulkFiles.size()]); - Arrays.sort(indices); - int position = 0; - for (Integer index : indices) { - File file = bulkFiles.remove(index); - Integer newIndex = position++; - File newFile = new File( - directory, format(FILE_NAME_FORMAT, newIndex, "a")); - log.info("Renaming {} to {}", file, newFile); - file.renameTo(newFile); - dataFiles.put(newIndex, singletonMap('a', newFile)); - } - } - - return dataFiles; - } - - @Deprecated - public long size() { - fileStoreLock.readLock().lock(); - try { - long size = writeFile != null ? writeFile.length() : 0; - for (TarReader reader : readers) { - size += reader.size(); - } - return size; - } finally { - fileStoreLock.readLock().unlock(); - } - } - - @Deprecated - public int readerCount(){ - fileStoreLock.readLock().lock(); - try { - return readers.size(); - } finally { - fileStoreLock.readLock().unlock(); - } - } - - /** - * Returns the number of segments in this TarMK instance. - * - * @return number of segments - */ - private int count() { - fileStoreLock.readLock().lock(); - try { - int count = 0; - if (writer != null) { - count += writer.count(); - } - for (TarReader reader : readers) { - count += reader.count(); - } - return count; - } finally { - fileStoreLock.readLock().unlock(); - } - } - - /** - * Estimated compaction gain. The result will be undefined if stopped through - * the passed {@code stop} signal. - * @param stop signal for stopping the estimation process. - * @return compaction gain estimate - */ - CompactionGainEstimate estimateCompactionGain(Supplier stop) { - CompactionGainEstimate estimate = new CompactionGainEstimate(getHead(), count(), stop); - fileStoreLock.readLock().lock(); - try { - for (TarReader reader : readers) { - reader.accept(estimate); - if (stop.get()) { - break; - } - } - } finally { - fileStoreLock.readLock().unlock(); - } - return estimate; - } - - @Deprecated - public FileStoreStats getStats() { - return stats; - } - - @Deprecated - public void flush() throws IOException { - flush(cleanupNeeded.getAndSet(false)); - } - - @Deprecated - public void flush(boolean cleanup) throws IOException { - synchronized (persistedHead) { - RecordId before = persistedHead.get(); - RecordId after = head.get(); - - if (cleanup || !after.equals(before)) { - // needs to happen outside the synchronization block below to - // avoid a deadlock with another thread flushing the writer - tracker.getWriter().flush(); - - // needs to happen outside the synchronization block below to - // prevent the flush from stopping concurrent reads and writes - writer.flush(); - - fileStoreLock.writeLock().lock(); - try { - log.debug("TarMK journal update {} -> {}", before, after); - journalFile.writeBytes(after.toString10() + " root " + System.currentTimeMillis()+"\n"); - journalFile.getChannel().force(false); - persistedHead.set(after); - } finally { - fileStoreLock.writeLock().unlock(); - } - - // Needs to happen outside the synchronization block above to - // prevent the flush from stopping concurrent reads and writes - // by the persisted compaction map. See OAK-3264 - if (cleanup) { - // Explicitly give up reference to the previous root state - // otherwise they would block cleanup. See OAK-3347 - before = null; - after = null; - pendingRemove.addAll(cleanup()); - } - } - - // remove all obsolete tar generations - Iterator iterator = pendingRemove.iterator(); - while (iterator.hasNext()) { - File file = iterator.next(); - log.debug("TarMK GC: Attempting to remove old file {}", - file); - if (!file.exists() || file.delete()) { - log.debug("TarMK GC: Removed old file {}", file); - iterator.remove(); - } else { - log.warn("TarMK GC: Failed to remove old file {}. Will retry later.", file); - } - } - } - } - - /** - * Runs garbage collection on the segment level, which could write new - * generations of tar files. It checks which segments are still reachable, - * and throws away those that are not. - *

- * A new generation of a tar file is created (and segments are only - * discarded) if doing so releases more than 25% of the space in a tar file. - */ - @Deprecated - public List cleanup() throws IOException { - Stopwatch watch = Stopwatch.createStarted(); - long initialSize = size(); - Set referencedIds = newHashSet(); - Map cleaned = newLinkedHashMap(); - - fileStoreLock.writeLock().lock(); - try { - gcMonitor.info("TarMK GC #{}: cleanup started. Current repository size is {} ({} bytes)", - gcCount, humanReadableByteCount(initialSize), initialSize); - - newWriter(); - tracker.clearCache(); - - // Suggest to the JVM that now would be a good time - // to clear stale weak references in the SegmentTracker - System.gc(); - - for (SegmentId id : tracker.getReferencedSegmentIds()) { - referencedIds.add(id.asUUID()); - } - writer.collectReferences(referencedIds); - for (TarReader reader : readers) { - cleaned.put(reader, reader); - } - } finally { - fileStoreLock.writeLock().unlock(); - } - - // Do actual cleanup outside of the lock to prevent blocking - // concurrent writers for a long time - includeForwardReferences(cleaned.keySet(), referencedIds); - LinkedList toRemove = newLinkedList(); - Set cleanedIds = newHashSet(); - for (TarReader reader : cleaned.keySet()) { - cleaned.put(reader, reader.cleanup(referencedIds, cleanedIds)); - if (shutdown) { - gcMonitor.info("TarMK GC #{}: cleanup interrupted", gcCount); - break; - } - } - - List oldReaders = newArrayList(); - fileStoreLock.writeLock().lock(); - try { - // Replace current list of reader with the cleaned readers taking care not to lose - // any new reader that might have come in through concurrent calls to newWriter() - List newReaders = newArrayList(); - for (TarReader reader : readers) { - if (cleaned.containsKey(reader)) { - TarReader newReader = cleaned.get(reader); - if (newReader != null) { - newReaders.add(newReader); - } - if (newReader != reader) { - oldReaders.add(reader); - } - } else { - newReaders.add(reader); - } - } - readers = newReaders; - } finally { - fileStoreLock.writeLock().unlock(); - } - - // Close old readers *after* setting readers to the new readers to avoid accessing - // a closed reader from readSegment() - for (TarReader oldReader : oldReaders) { - closeAndLogOnFail(oldReader); - File file = oldReader.getFile(); - gcMonitor.info("TarMK GC #{}: cleanup marking file for deletion: {}", gcCount, file.getName()); - toRemove.addLast(file); - } - - CompactionMap cm = tracker.getCompactionMap(); - cm.remove(cleanedIds); - long finalSize = size(); - approximateSize.set(finalSize); - stats.reclaimed(initialSize - finalSize); - gcMonitor.cleaned(initialSize - finalSize, finalSize); - gcMonitor.info("TarMK GC #{}: cleanup completed in {} ({} ms). Post cleanup size is {} ({} bytes)" + - " and space reclaimed {} ({} bytes). Compaction map weight/depth is {}/{} ({} bytes/{}).", - gcCount, watch, watch.elapsed(MILLISECONDS), - humanReadableByteCount(finalSize), finalSize, - humanReadableByteCount(initialSize - finalSize), initialSize - finalSize, - humanReadableByteCount(sum(cm.getEstimatedWeights())), cm.getDepth(), - sum(cm.getEstimatedWeights()), cm.getDepth()); - return toRemove; - } - - /** - * Include the ids of all segments transitively reachable through forward references from - * {@code referencedIds}. See OAK-3864. - */ - private void includeForwardReferences(Iterable readers, Set referencedIds) - throws IOException { - Set fRefs = newHashSet(referencedIds); - do { - // Add direct forward references - for (TarReader reader : readers) { - reader.calculateForwardReferences(fRefs); - if (fRefs.isEmpty()) { - break; // Optimisation: bail out if no references left - } - } - if (!fRefs.isEmpty()) { - gcMonitor.info("TarMK GC #{}: cleanup found {} forward references", gcCount, fRefs.size()); - log.debug("TarMK GC #{}: cleanup found forward references to {}", gcCount, fRefs); - } - // ... as long as new forward references are found. - } while (referencedIds.addAll(fRefs)); - } - - /** - * Returns the cancellation policy for the compaction phase. If the disk - * space was considered insufficient at least once during compaction (or if - * the space was never sufficient to begin with), compaction is considered - * canceled. - * Furthermore when the file store is shutting down, compaction is considered - * canceled. - * - * @return a flag indicating if compaction should be canceled. - */ - private Supplier newCancelCompactionCondition() { - return new Supplier() { - - private boolean outOfDiskSpace; - private boolean shutdown; - - @Override - public Boolean get() { - - // The outOfDiskSpace and shutdown flags can only transition from false (their initial - // values), to true. Once true, there should be no way to go back. - if (!sufficientDiskSpace.get()) { - outOfDiskSpace = true; - } - if (FileStore.this.shutdown) { - this.shutdown = true; - } - - return shutdown || outOfDiskSpace; - } - - @Override - public String toString() { - if (outOfDiskSpace) { - return "Not enough disk space available"; - } else if (shutdown) { - return "FileStore shutdown request received"; - } else { - return ""; - } - } - }; - } - - /** - * Returns a signal indication the file store shutting down. - * @return a shutdown signal - */ - private Supplier newShutdownSignal() { - return new Supplier() { - @Override - public Boolean get() { - return shutdown; - } - }; - } - - /** - * Copy every referenced record in data (non-bulk) segments. Bulk segments - * are fully kept (they are only removed in cleanup, if there is no - * reference to them). - */ - @Deprecated - public void compact() throws IOException { - checkState(!compactionStrategy.equals(NO_COMPACTION), - "You must set a compactionStrategy before calling compact"); - gcMonitor.info("TarMK GC #{}: compaction started, strategy={}", gcCount, compactionStrategy); - Stopwatch watch = Stopwatch.createStarted(); - Supplier compactionCanceled = newCancelCompactionCondition(); - Compactor compactor = new Compactor(tracker, compactionStrategy, compactionCanceled); - SegmentNodeState before = getHead(); - long existing = before.getChildNode(SegmentNodeStore.CHECKPOINTS) - .getChildNodeCount(Long.MAX_VALUE); - if (existing > 1) { - gcMonitor.warn( - "TarMK GC #{}: compaction found {} checkpoints, you might need to run checkpoint cleanup", - gcCount, existing); - } - - SegmentNodeState after = compactor.compact(EMPTY_NODE, before, EMPTY_NODE); - gcMonitor.info("TarMK GC #{}: compacted {} to {}", - gcCount, before.getRecordId(), after.getRecordId()); - - if (compactionCanceled.get()) { - gcMonitor.warn("TarMK GC #{}: compaction canceled: {}", gcCount, compactionCanceled); - return; - } - - Callable setHead = new SetHead(before, after, compactor); - try { - int cycles = 0; - boolean success = false; - while(cycles++ < compactionStrategy.getRetryCount() - && !(success = compactionStrategy.compacted(setHead))) { - // Some other concurrent changes have been made. - // Rebase (and compact) those changes on top of the - // compacted state before retrying to set the head. - gcMonitor.info("TarMK GC #{}: compaction detected concurrent commits while compacting. " + - "Compacting these commits. Cycle {}", gcCount, cycles); - SegmentNodeState head = getHead(); - after = compactor.compact(before, head, after); - gcMonitor.info("TarMK GC #{}: compacted {} against {} to {}", - gcCount, head.getRecordId(), before.getRecordId(), after.getRecordId()); - - if (compactionCanceled.get()) { - gcMonitor.warn("TarMK GC #{}: compaction canceled: {}", gcCount, compactionCanceled); - return; - } - - before = head; - setHead = new SetHead(head, after, compactor); - } - if (!success) { - gcMonitor.info("TarMK GC #{}: compaction gave up compacting concurrent commits after {} cycles.", - gcCount, cycles - 1); - if (compactionStrategy.getForceAfterFail()) { - gcMonitor.info("TarMK GC #{}: compaction force compacting remaining commits", gcCount); - if (!forceCompact(before, after, compactor)) { - gcMonitor.warn("TarMK GC #{}: compaction failed to force compact remaining commits. " + - "Most likely compaction didn't get exclusive access to the store.", gcCount); - } - } - } - - gcMonitor.info("TarMK GC #{}: compaction completed in {} ({} ms), after {} cycles", - gcCount, watch, watch.elapsed(MILLISECONDS), cycles - 1); - } catch (Exception e) { - gcMonitor.error("TarMK GC #" + gcCount + ": compaction encountered an error", e); - } - } - - private boolean forceCompact(final NodeState before, final SegmentNodeState onto, final Compactor compactor) throws Exception { - return compactionStrategy.compacted(new Callable() { - @Override - public Boolean call() throws Exception { - return new SetHead(getHead(), compactor.compact(before, getHead(), onto), compactor).call(); - } - }); - } - - @Deprecated - public Iterable getSegmentIds() { - fileStoreLock.readLock().lock(); - try { - List ids = newArrayList(); - if (writer != null) { - for (UUID uuid : writer.getUUIDs()) { - ids.add(tracker.getSegmentId( - uuid.getMostSignificantBits(), - uuid.getLeastSignificantBits())); - } - } - for (TarReader reader : readers) { - for (UUID uuid : reader.getUUIDs()) { - ids.add(tracker.getSegmentId( - uuid.getMostSignificantBits(), - uuid.getLeastSignificantBits())); - } - } - return ids; - } finally { - fileStoreLock.readLock().unlock(); - } - } - - @Override - @Deprecated - public SegmentTracker getTracker() { - return tracker; - } - - @Override - @Deprecated - public SegmentNodeState getHead() { - return new SegmentNodeState(head.get()); - } - - @Override - @Deprecated - public boolean setHead(SegmentNodeState base, SegmentNodeState head) { - RecordId id = this.head.get(); - return id.equals(base.getRecordId()) - && this.head.compareAndSet(id, head.getRecordId()); - } - - @Override - @Deprecated - public void close() { - // Flag the store as shutting / shut down - shutdown = true; - - // avoid deadlocks by closing (and joining) the background - // threads before acquiring the synchronization lock - closeAndLogOnFail(compactionThread); - closeAndLogOnFail(flushThread); - closeAndLogOnFail(diskSpaceThread); - try { - flush(); - tracker.getWriter().dropCache(); - fileStoreLock.writeLock().lock(); - try { - closeAndLogOnFail(writer); - - List list = readers; - readers = newArrayList(); - for (TarReader reader : list) { - closeAndLogOnFail(reader); - } - - if (lock != null) { - lock.release(); - } - closeAndLogOnFail(lockFile); - closeAndLogOnFail(journalFile); - } finally { - fileStoreLock.writeLock().unlock(); - } - } catch (IOException e) { - throw new RuntimeException( - "Failed to close the TarMK at " + directory, e); - } - - System.gc(); // for any memory-mappings that are no longer used - - log.info("TarMK closed: {}", directory); - } - - @Override - @Deprecated - public boolean containsSegment(SegmentId id) { - if (id.getTracker() == tracker) { - return true; - } - - long msb = id.getMostSignificantBits(); - long lsb = id.getLeastSignificantBits(); - return containsSegment(msb, lsb); - } - - private boolean containsSegment(long msb, long lsb) { - for (TarReader reader : readers) { - if (reader.containsEntry(msb, lsb)) { - return true; - } - } - - if (writer != null) { - fileStoreLock.readLock().lock(); - try { - if (writer.containsEntry(msb, lsb)) { - return true; - } - } finally { - fileStoreLock.readLock().unlock(); - } - } - - // the writer might have switched to a new file, - // so we need to re-check the readers - for (TarReader reader : readers) { - if (reader.containsEntry(msb, lsb)) { - return true; - } - } - - return false; - } - - @Override - @Deprecated - public Segment readSegment(SegmentId id) { - long msb = id.getMostSignificantBits(); - long lsb = id.getLeastSignificantBits(); - - for (TarReader reader : readers) { - try { - if (reader.isClosed()) { - // Cleanup might already have closed the file. - // The segment should be available from another file. - log.debug("Skipping closed tar file {}", reader); - continue; - } - - ByteBuffer buffer = reader.readEntry(msb, lsb); - if (buffer != null) { - return new Segment(tracker, id, buffer); - } - } catch (IOException e) { - log.warn("Failed to read from tar file " + reader, e); - } - } - - if (writer != null) { - fileStoreLock.readLock().lock(); - try { - try { - ByteBuffer buffer = writer.readEntry(msb, lsb); - if (buffer != null) { - return new Segment(tracker, id, buffer); - } - } catch (IOException e) { - log.warn("Failed to read from tar file " + writer, e); - } - } finally { - fileStoreLock.readLock().unlock(); - } - } - - // the writer might have switched to a new file, - // so we need to re-check the readers - for (TarReader reader : readers) { - try { - if (reader.isClosed()) { - // Cleanup might already have closed the file. - // The segment should be available from another file. - log.info("Skipping closed tar file {}", reader); - continue; - } - - ByteBuffer buffer = reader.readEntry(msb, lsb); - if (buffer != null) { - return new Segment(tracker, id, buffer); - } - } catch (IOException e) { - log.warn("Failed to read from tar file " + reader, e); - } - } - - throw new SegmentNotFoundException(id); - } - - @Override - @Deprecated - public void writeSegment(SegmentId id, byte[] data, int offset, int length) throws IOException { - fileStoreLock.writeLock().lock(); - try { - long size = writer.writeEntry( - id.getMostSignificantBits(), - id.getLeastSignificantBits(), - data, offset, length); - if (size >= maxFileSize) { - newWriter(); - } - approximateSize.addAndGet(TarWriter.BLOCK_SIZE + length + TarWriter.getPaddingSize(length)); - } finally { - fileStoreLock.writeLock().unlock(); - } - } - - private void newWriter() throws IOException { - if (writer.isDirty()) { - writer.close(); - - List list = - newArrayListWithCapacity(1 + readers.size()); - list.add(TarReader.open(writeFile, memoryMapping)); - list.addAll(readers); - readers = list; - - writeNumber++; - writeFile = new File( - directory, - String.format(FILE_NAME_FORMAT, writeNumber, "a")); - writer = new TarWriter(writeFile, stats); - } - } - - @Override - @Deprecated - public Blob readBlob(String blobId) { - if (blobStore != null) { - return new BlobStoreBlob(blobStore, blobId); - } - throw new IllegalStateException("Attempt to read external blob with blobId [" + blobId + "] " + - "without specifying BlobStore"); - } - - @Override - @Deprecated - public BlobStore getBlobStore() { - return blobStore; - } - - @Override - @Deprecated - public void gc() { - if (compactionStrategy == NO_COMPACTION) { - log.warn("Call to gc while compaction strategy set to {}. ", NO_COMPACTION); - } - compactionThread.trigger(); - } - - @Deprecated - public Map> getTarReaderIndex() { - Map> index = new HashMap>(); - for (TarReader reader : readers) { - index.put(reader.getFile().getAbsolutePath(), reader.getUUIDs()); - } - return index; - } - - @Deprecated - public Map> getTarGraph(String fileName) throws IOException { - for (TarReader reader : readers) { - if (fileName.equals(reader.getFile().getName())) { - Map> graph = newHashMap(); - for (UUID uuid : reader.getUUIDs()) { - graph.put(uuid, null); - } - Map> g = reader.getGraph(); - if (g != null) { - graph.putAll(g); - } - return graph; - } - } - return emptyMap(); - } - - @Deprecated - public FileStore setCompactionStrategy(CompactionStrategy strategy) { - this.compactionStrategy = strategy; - log.info("Compaction strategy set to: {}", strategy); - return this; - } - - private void setRevision(String rootRevision) { - fileStoreLock.writeLock().lock(); - try { - RecordId id = RecordId.fromString(tracker, rootRevision); - head.set(id); - persistedHead.set(id); - } finally { - fileStoreLock.writeLock().unlock(); - } - } - - private void checkDiskSpace() { - long repositoryDiskSpace = approximateSize.get(); - long availableDiskSpace = directory.getFreeSpace(); - boolean updated = compactionStrategy.isDiskSpaceSufficient(repositoryDiskSpace, availableDiskSpace); - boolean previous = sufficientDiskSpace.getAndSet(updated); - - if (previous && !updated) { - log.warn("Available disk space ({}) is too low, current repository size is approx. {}", - humanReadableByteCount(availableDiskSpace), - humanReadableByteCount(repositoryDiskSpace)); - } - - if (updated && !previous) { - log.info("Available disk space ({}) is sufficient again for repository operations, current repository size is approx. {}", - humanReadableByteCount(availableDiskSpace), - humanReadableByteCount(repositoryDiskSpace)); - } - } - - /** - * A read only {@link FileStore} implementation that supports - * going back to old revisions. - *

- * All write methods are no-ops. - */ - @Deprecated - public static class ReadOnlyStore extends FileStore { - - private ReadOnlyStore(Builder builder) throws IOException, InvalidFileStoreVersionException { - super(builder, true); - } - - /** - * Go to the specified {@code revision} - * - * @param revision - */ - @Deprecated - public void setRevision(String revision) { - super.setRevision(revision); - } - - /** - * Build the graph of segments reachable from an initial set of segments - * @param roots the initial set of segments - * @param visitor visitor receiving call back while following the segment graph - * @throws IOException - */ - @Deprecated - public void traverseSegmentGraph( - @Nonnull Set roots, - @Nonnull SegmentGraphVisitor visitor) throws IOException { - - List readers = super.readers; - super.includeForwardReferences(readers, roots); - for (TarReader reader : readers) { - reader.traverseSegmentGraph(checkNotNull(roots), checkNotNull(visitor)); - } - } - - @Override - @Deprecated - public boolean setHead(SegmentNodeState base, SegmentNodeState head) { - throw new UnsupportedOperationException("Read Only Store"); - } - - @Override - @Deprecated - public void writeSegment(SegmentId id, byte[] data, - int offset, int length) { - throw new UnsupportedOperationException("Read Only Store"); - } - - /** - * no-op - */ - @Override - @Deprecated - public void flush() { /* nop */ } - - @Override - @Deprecated - public LinkedList cleanup() { - throw new UnsupportedOperationException("Read Only Store"); - } - - @Override - @Deprecated - public void gc() { - throw new UnsupportedOperationException("Read Only Store"); - } - - @Override - @Deprecated - public void compact() { - throw new UnsupportedOperationException("Read Only Store"); - } - - @Override - @Deprecated - public boolean maybeCompact(boolean cleanup) { - throw new UnsupportedOperationException("Read Only Store"); - } - } - - private class SetHead implements Callable { - private final SegmentNodeState before; - private final SegmentNodeState after; - private final Compactor compactor; - - @Deprecated - public SetHead(SegmentNodeState before, SegmentNodeState after, Compactor compactor) { - this.before = before; - this.after = after; - this.compactor = compactor; - } - - @Override - @Deprecated - public Boolean call() throws Exception { - // When used in conjunction with the SegmentNodeStore, this method - // needs to be called inside the commitSemaphore as doing otherwise - // might result in mixed segments. See OAK-2192. - if (setHead(before, after)) { - tracker.setCompactionMap(compactor.getCompactionMap()); - - // Drop the SegmentWriter caches and flush any existing state - // in an attempt to prevent new references to old pre-compacted - // content. TODO: There should be a cleaner way to do this. (implement GCMonitor!?) - tracker.getWriter().dropCache(); - tracker.getWriter().flush(); - - gcMonitor.compacted(); - tracker.clearSegmentIdTables(compactionStrategy); - return true; - } else { - return false; - } - } - } - - @Deprecated - public SegmentVersion getVersion() { - return version; - } - - private static void closeAndLogOnFail(Closeable closeable) { - if (closeable != null) { - try { - closeable.close(); - } catch (IOException ioe) { - // ignore and log - log.error(ioe.getMessage(), ioe); - } - } - } - - private static class LoggingGCMonitor implements GCMonitor { - public GCMonitor delegatee = GCMonitor.EMPTY; - - @Override - public void info(String message, Object... arguments) { - log.info(message, arguments); - delegatee.info(message, arguments); - } - - @Override - public void warn(String message, Object... arguments) { - log.warn(message, arguments); - delegatee.warn(message, arguments); - } - - @Override - public void error(String message, Exception exception) { - delegatee.error(message, exception); - } - - @Override - public void skipped(String reason, Object... arguments) { - log.info(reason, arguments); - delegatee.skipped(reason, arguments); - } - - @Override - public void compacted() { - delegatee.compacted(); - } - - @Override - public void cleaned(long reclaimedSize, long currentSize) { - delegatee.cleaned(reclaimedSize, currentSize); - } - - @Override - public void updateStatus(String status) { - delegatee.updateStatus(status); - } - } -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreGCMonitor.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreGCMonitor.java deleted file mode 100644 index 3da6843..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreGCMonitor.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static com.google.common.base.Preconditions.checkNotNull; -import static java.text.DateFormat.getDateTimeInstance; -import static org.apache.jackrabbit.oak.commons.IOUtils.humanReadableByteCount; -import static org.apache.jackrabbit.stats.TimeSeriesStatsUtil.asCompositeData; -import static org.slf4j.helpers.MessageFormatter.arrayFormat; - -import java.io.PrintWriter; -import java.io.StringWriter; -import java.util.Date; - -import javax.annotation.Nonnull; -import javax.management.openmbean.CompositeData; - -import org.apache.jackrabbit.oak.commons.jmx.AnnotatedStandardMBean; -import org.apache.jackrabbit.oak.spi.gc.GCMonitor; -import org.apache.jackrabbit.oak.stats.Clock; -import org.apache.jackrabbit.stats.TimeSeriesRecorder; - -/** - * {@link GCMonitor} implementation providing the file store gc status - * as {@link GCMonitorMBean}. - *

- * Users of this class need to schedule a call to {@link #run()} once per - * second to ensure the various time series maintained by this implementation - * are correctly aggregated. - */ -@Deprecated -public class FileStoreGCMonitor extends AnnotatedStandardMBean - implements GCMonitor, GCMonitorMBean, Runnable { - private final TimeSeriesRecorder gcCount = new TimeSeriesRecorder(true); - private final TimeSeriesRecorder repositorySize = new TimeSeriesRecorder(false); - private final TimeSeriesRecorder reclaimedSize = new TimeSeriesRecorder(true); - - private final Clock clock; - - private long lastCompaction; - private long[] segmentCounts = new long[0]; - private long[] recordCounts = new long[0]; - private long[] compactionMapWeights = new long[0]; - private long lastCleanup; - private String lastError; - private String status = "NA"; - - @Deprecated - public FileStoreGCMonitor(@Nonnull Clock clock) { - super(GCMonitorMBean.class); - this.clock = checkNotNull(clock); - } - - //------------------------------------------------------------< Runnable >--- - - @Override - @Deprecated - public void run() { - gcCount.recordOneSecond(); - repositorySize.recordOneSecond(); - reclaimedSize.recordOneSecond(); - } - - //------------------------------------------------------------< GCMonitor >--- - - @Override - @Deprecated - public void info(String message, Object... arguments) { - status = arrayFormat(message, arguments).getMessage(); - } - - @Override - @Deprecated - public void warn(String message, Object... arguments) { - status = arrayFormat(message, arguments).getMessage(); - } - - @Override - @Deprecated - public void error(String message, Exception exception) { - StringWriter sw = new StringWriter(); - sw.write(message + ": "); - exception.printStackTrace(new PrintWriter(sw)); - lastError = sw.toString(); - } - - @Override - @Deprecated - public void skipped(String reason, Object... arguments) { - status = arrayFormat(reason, arguments).getMessage(); - } - - @Override - @Deprecated - public void compacted() { - lastCompaction = clock.getTime(); - } - - @Override - @Deprecated - public void cleaned(long reclaimed, long current) { - lastCleanup = clock.getTime(); - gcCount.getCounter().addAndGet(1); - repositorySize.getCounter().set(current); - reclaimedSize.getCounter().addAndGet(reclaimed); - } - - @Override - @Deprecated - public void updateStatus(String status) { - } - - //------------------------------------------------------------< GCMonitorMBean >--- - - @Override - @Deprecated - public String getLastCompaction() { - return toString(lastCompaction); - } - - @Override - @Deprecated - public String getLastCleanup() { - return toString(lastCleanup); - } - - private static String toString(long timestamp) { - if (timestamp != 0) { - return getDateTimeInstance().format(new Date(timestamp)); - } else { - return null; - } - } - - @Override - @Deprecated - public String getLastError() { - return lastError; - } - - @Nonnull - @Override - @Deprecated - public String getStatus() { - return status; - } - - @Override - @Deprecated - public String getCompactionMapStats() { - StringBuilder sb = new StringBuilder(); - String sep = ""; - for (int k = 0; k < segmentCounts.length; k++) { - sb.append(sep).append('[') - .append("Estimated Weight: ") - .append(humanReadableByteCount(compactionMapWeights[k])).append(", ") - .append("Segments: ") - .append(segmentCounts[k]).append(", ") - .append("Records: ") - .append(recordCounts[k]).append(']'); - sep = ", "; - } - return sb.toString(); - } - - @Nonnull - @Override - @Deprecated - public CompositeData getRepositorySize() { - return asCompositeData(repositorySize, "RepositorySize"); - } - - @Nonnull - @Override - @Deprecated - public CompositeData getReclaimedSize() { - return asCompositeData(reclaimedSize, "ReclaimedSize"); - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreMonitor.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreMonitor.java deleted file mode 100644 index 79815a4..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreMonitor.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -/** - * FileStoreMonitor are notified for any writes or deletes - * performed by FileStore - */ -interface FileStoreMonitor { - FileStoreMonitor DEFAULT = new FileStoreMonitor() { - @Override - public void written(long bytes) { - - } - - @Override - public void reclaimed(long bytes) { - - } - }; - - /** - * Notifies the monitor when data is written - * - * @param bytes number of bytes written - */ - void written(long bytes); - - /** - * Notifies the monitor when memory is reclaimed - * - * @param bytes number of bytes reclaimed - */ - void reclaimed(long bytes); -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreStats.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreStats.java deleted file mode 100644 index 4c67861..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreStats.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -import javax.annotation.Nonnull; -import javax.management.openmbean.CompositeData; - -import org.apache.jackrabbit.api.stats.TimeSeries; -import org.apache.jackrabbit.oak.commons.IOUtils; -import org.apache.jackrabbit.oak.stats.CounterStats; -import org.apache.jackrabbit.oak.stats.MeterStats; -import org.apache.jackrabbit.oak.stats.StatisticsProvider; -import org.apache.jackrabbit.oak.stats.StatsOptions; - -import static org.apache.jackrabbit.stats.TimeSeriesStatsUtil.asCompositeData; - -@Deprecated -public class FileStoreStats implements FileStoreStatsMBean, FileStoreMonitor { - @Deprecated - public static final String SEGMENT_REPO_SIZE = "SEGMENT_REPO_SIZE"; - @Deprecated - public static final String SEGMENT_WRITES = "SEGMENT_WRITES"; - private final StatisticsProvider statisticsProvider; - private final FileStore store; - private final MeterStats writeStats; - private final CounterStats repoSize; - - @Deprecated - public FileStoreStats(StatisticsProvider statisticsProvider, FileStore store, long initialSize) { - this.statisticsProvider = statisticsProvider; - this.store = store; - this.writeStats = statisticsProvider.getMeter(SEGMENT_WRITES, StatsOptions.DEFAULT); - this.repoSize = statisticsProvider.getCounterStats(SEGMENT_REPO_SIZE, StatsOptions.DEFAULT); - repoSize.inc(initialSize); - } - - //~-----------------------------< FileStoreMonitor > - - @Override - @Deprecated - public void written(long delta) { - writeStats.mark(delta); - repoSize.inc(delta); - } - - @Override - @Deprecated - public void reclaimed(long size) { - repoSize.dec(size); - } - - //~--------------------------------< FileStoreStatsMBean > - - @Override - @Deprecated - public long getApproximateSize() { - return repoSize.getCount(); - } - - @Override - @Deprecated - public int getTarFileCount() { - return store.readerCount() + 1; //1 for the writer - } - - @Nonnull - @Override - @Deprecated - public CompositeData getWriteStats() { - return asCompositeData(getTimeSeries(SEGMENT_WRITES), SEGMENT_WRITES); - } - - @Nonnull - @Override - @Deprecated - public CompositeData getRepositorySize() { - return asCompositeData(getTimeSeries(SEGMENT_REPO_SIZE), SEGMENT_REPO_SIZE); - } - - @Override - @Deprecated - public String fileStoreInfoAsString() { - return String.format("Segment store size : %s%n" + - "Number of tar files : %d", - IOUtils.humanReadableByteCount(getApproximateSize()), - getTarFileCount()); - } - - private TimeSeries getTimeSeries(String name) { - return statisticsProvider.getStats().getTimeSeries(name, true); - } -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreStatsMBean.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreStatsMBean.java deleted file mode 100644 index 73bdd84..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreStatsMBean.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -import javax.management.openmbean.CompositeData; - -@Deprecated -public interface FileStoreStatsMBean { - - @Deprecated - String TYPE = "FileStoreStats"; - - @Deprecated - long getApproximateSize(); - - @Deprecated - int getTarFileCount(); - - /** - * @return time series of the writes to repository - */ - @Deprecated - CompositeData getWriteStats(); - - /** - * @return time series of the writes to repository - */ - @Deprecated - CompositeData getRepositorySize(); - - @Deprecated - String fileStoreInfoAsString(); -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/GCMonitorMBean.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/GCMonitorMBean.java deleted file mode 100644 index c002bfe..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/GCMonitorMBean.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; -import javax.management.openmbean.CompositeData; - -/** - * MBean for monitoring the revision garbage collection process of the - * {@link FileStore}. - */ -@Deprecated -public interface GCMonitorMBean { - @Deprecated - String TYPE = "GC Monitor"; - - /** - * @return timestamp of the last compaction or {@code null} if none. - */ - @CheckForNull - @Deprecated - String getLastCompaction(); - - /** - * @return timestamp of the last cleanup or {@code null} if none. - */ - @CheckForNull - @Deprecated - String getLastCleanup(); - - /** - * @return last error or {@code null} if none. - */ - @CheckForNull - @Deprecated - String getLastError(); - - /** - * @return current status. - */ - @Nonnull - @Deprecated - String getStatus(); - - /** - * Statistics about the compaction map. - */ - @Nonnull - @Deprecated - String getCompactionMapStats(); - - /** - * @return time series of the repository size - */ - @Nonnull - @Deprecated - CompositeData getRepositorySize(); - - /** - * @return time series of the reclaimed space - */ - @Nonnull - @Deprecated - CompositeData getReclaimedSize(); -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/InvalidFileStoreVersionException.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/InvalidFileStoreVersionException.java deleted file mode 100644 index 63ec67d..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/InvalidFileStoreVersionException.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -/** - * This exception can be thrown during the startup of the {@link FileStore} to - * indicate an incompatible version mismatch between the code that generated the - * data in the store and the current version of the {@link FileStore}. - */ -@Deprecated -public class InvalidFileStoreVersionException extends Exception { - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/JournalReader.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/JournalReader.java deleted file mode 100644 index 60867ea..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/JournalReader.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -import java.io.Closeable; -import java.io.File; -import java.io.IOException; -import java.util.Iterator; - -import com.google.common.collect.AbstractIterator; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Reader for journal files of the SegmentMK. - */ -@Deprecated -public final class JournalReader implements Closeable, Iterable { - private static final Logger LOG = LoggerFactory.getLogger(JournalReader.class); - - private final ReversedLinesFileReader journal; - - @Deprecated - public JournalReader(File journalFile) throws IOException { - journal = new ReversedLinesFileReader(journalFile); - } - - /** - * @return Iterator over the revisions in the journal in reverse order - * (end of the file to beginning). - */ - @Override - @Deprecated - public Iterator iterator() { - return new AbstractIterator() { - @Override - protected String computeNext() { - try { - String line = journal.readLine(); - while (line != null) { - int space = line.indexOf(' '); - if (space != -1) { - return line.substring(0, space); - } - line = journal.readLine(); - } - } catch (IOException e) { - LOG.error("Error reading journal file", e); - } - return endOfData(); - } - }; - } - - @Override - @Deprecated - public void close() throws IOException { - journal.close(); - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/ReversedLinesFileReader.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/ReversedLinesFileReader.java deleted file mode 100644 index 7574cc6..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/ReversedLinesFileReader.java +++ /dev/null @@ -1,351 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -import java.io.Closeable; -import java.io.File; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.io.UnsupportedEncodingException; -import java.nio.charset.Charset; -import java.nio.charset.CharsetEncoder; -import java.nio.charset.UnsupportedCharsetException; - -import org.apache.commons.io.Charsets; - -/** - * Reads lines in a file reversely (similar to a BufferedReader, but starting at - * the last line). Useful for e.g. searching in log files. - * - * FIXME: this is a copy of org.apache.commons.io.input.ReversedLinesFileReader - * with a fix for IO-471. Replace again once commons-io has released a fixed version. - */ -class ReversedLinesFileReader implements Closeable { - - private final int blockSize; - private final Charset encoding; - - private final RandomAccessFile randomAccessFile; - - private final long totalByteLength; - private final long totalBlockCount; - - private final byte[][] newLineSequences; - private final int avoidNewlineSplitBufferSize; - private final int byteDecrement; - - private FilePart currentFilePart; - - private boolean trailingNewlineOfFileSkipped = false; - - /** - * Creates a ReversedLinesFileReader with default block size of 4KB and the - * platform's default encoding. - * - * @param file - * the file to be read - * @throws IOException if an I/O error occurs - */ - public ReversedLinesFileReader(final File file) throws IOException { - this(file, 4096, Charset.defaultCharset().toString()); - } - - /** - * Creates a ReversedLinesFileReader with the given block size and encoding. - * - * @param file - * the file to be read - * @param blockSize - * size of the internal buffer (for ideal performance this should - * match with the block size of the underlying file system). - * @param encoding - * the encoding of the file - * @throws IOException if an I/O error occurs - * @since 2.3 - */ - public ReversedLinesFileReader(final File file, final int blockSize, final Charset encoding) throws IOException { - this.blockSize = blockSize; - this.encoding = encoding; - - randomAccessFile = new RandomAccessFile(file, "r"); - totalByteLength = randomAccessFile.length(); - int lastBlockLength = (int) (totalByteLength % blockSize); - if (lastBlockLength > 0) { - totalBlockCount = totalByteLength / blockSize + 1; - } else { - totalBlockCount = totalByteLength / blockSize; - if (totalByteLength > 0) { - lastBlockLength = blockSize; - } - } - currentFilePart = new FilePart(totalBlockCount, lastBlockLength, null); - - // --- check & prepare encoding --- - Charset charset = Charsets.toCharset(encoding); - CharsetEncoder charsetEncoder = charset.newEncoder(); - float maxBytesPerChar = charsetEncoder.maxBytesPerChar(); - if(maxBytesPerChar==1f) { - // all one byte encodings are no problem - byteDecrement = 1; - } else if(charset == Charset.forName("UTF-8")) { - // UTF-8 works fine out of the box, for multibyte sequences a second UTF-8 byte can never be a newline byte - // http://en.wikipedia.org/wiki/UTF-8 - byteDecrement = 1; - } else if(charset == Charset.forName("Shift_JIS") || // Same as for UTF-8 http://www.herongyang.com/Unicode/JIS-Shift-JIS-Encoding.html - charset == Charset.forName("windows-31j") || // Windows code page 932 (Japanese) - charset == Charset.forName("x-windows-949") || // Windows code page 949 (Korean) - charset == Charset.forName("gbk") || // Windows code page 936 (Simplified Chinese) - charset == Charset.forName("x-windows-950")) { // Windows code page 950 (Traditional Chinese) - byteDecrement = 1; - } else if(charset == Charset.forName("UTF-16BE") || charset == Charset.forName("UTF-16LE")) { - // UTF-16 new line sequences are not allowed as second tuple of four byte sequences, - // however byte order has to be specified - byteDecrement = 2; - } else if(charset == Charset.forName("UTF-16")) { - throw new UnsupportedEncodingException( - "For UTF-16, you need to specify the byte order (use UTF-16BE or UTF-16LE)"); - } else { - throw new UnsupportedEncodingException( - "Encoding "+encoding+" is not supported yet (feel free to submit a patch)"); - } - // NOTE: The new line sequences are matched in the order given, so it is important that \r\n is BEFORE \n - newLineSequences = new byte[][] { "\r\n".getBytes(encoding), "\n".getBytes(encoding), "\r".getBytes(encoding) }; - - avoidNewlineSplitBufferSize = newLineSequences[0].length; - } - - /** - * Creates a ReversedLinesFileReader with the given block size and encoding. - * - * @param file - * the file to be read - * @param blockSize - * size of the internal buffer (for ideal performance this should - * match with the block size of the underlying file system). - * @param encoding - * the encoding of the file - * @throws IOException if an I/O error occurs - * @throws UnsupportedCharsetException - * thrown instead of {@link UnsupportedEncodingException} in version 2.2 if the encoding is not - * supported. - */ - public ReversedLinesFileReader(final File file, final int blockSize, final String encoding) throws IOException { - this(file, blockSize, Charsets.toCharset(encoding)); - } - - /** - * Returns the lines of the file from bottom to top. - * - * @return the next line or null if the start of the file is reached - * @throws IOException if an I/O error occurs - */ - public String readLine() throws IOException { - - String line = currentFilePart.readLine(); - while (line == null) { - currentFilePart = currentFilePart.rollOver(); - if (currentFilePart != null) { - line = currentFilePart.readLine(); - } else { - // no more fileparts: we're done, leave line set to null - break; - } - } - - // aligned behaviour wiht BufferedReader that doesn't return a last, emtpy line - if("".equals(line) && !trailingNewlineOfFileSkipped) { - trailingNewlineOfFileSkipped = true; - line = readLine(); - } - - return line; - } - - /** - * Closes underlying resources. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - randomAccessFile.close(); - } - - private class FilePart { - private final long no; - - private final byte[] data; - - private byte[] leftOver; - - private int currentLastBytePos; - - /** - * ctor - * @param no the part number - * @param length its length - * @param leftOverOfLastFilePart remainder - * @throws IOException if there is a problem reading the file - */ - private FilePart(final long no, final int length, final byte[] leftOverOfLastFilePart) throws IOException { - this.no = no; - int dataLength = length + (leftOverOfLastFilePart != null ? leftOverOfLastFilePart.length : 0); - this.data = new byte[dataLength]; - final long off = (no - 1) * blockSize; - - // read data - if (no > 0 /* file not empty */) { - randomAccessFile.seek(off); - final int countRead = randomAccessFile.read(data, 0, length); - if (countRead != length) { - throw new IllegalStateException("Count of requested bytes and actually read bytes don't match"); - } - } - // copy left over part into data arr - if (leftOverOfLastFilePart != null) { - System.arraycopy(leftOverOfLastFilePart, 0, data, length, leftOverOfLastFilePart.length); - } - this.currentLastBytePos = data.length - 1; - this.leftOver = null; - } - - /** - * Handles block rollover - * - * @return the new FilePart or null - * @throws IOException if there was a problem reading the file - */ - private FilePart rollOver() throws IOException { - - if (currentLastBytePos > -1) { - throw new IllegalStateException("Current currentLastCharPos unexpectedly positive... " - + "last readLine() should have returned something! currentLastCharPos=" + currentLastBytePos); - } - - if (no > 1) { - return new FilePart(no - 1, blockSize, leftOver); - } else { - // NO 1 was the last FilePart, we're finished - if (leftOver != null) { - throw new IllegalStateException("Unexpected leftover of the last block: leftOverOfThisFilePart=" - + new String(leftOver, encoding)); - } - return null; - } - } - - /** - * Reads a line. - * - * @return the line or null - * @throws IOException if there is an error reading from the file - */ - private String readLine() throws IOException { - - String line = null; - int newLineMatchByteCount; - - boolean isLastFilePart = no == 1; - - int i = currentLastBytePos; - while (i > -1) { - - if (!isLastFilePart && i < avoidNewlineSplitBufferSize) { - // avoidNewlineSplitBuffer: for all except the last file part we - // take a few bytes to the next file part to avoid splitting of newlines - createLeftOver(); - break; // skip last few bytes and leave it to the next file part - } - - // --- check for newline --- - if ((newLineMatchByteCount = getNewLineMatchByteCount(data, i)) > 0 /* found newline */) { - final int lineStart = i + 1; - int lineLengthBytes = currentLastBytePos - lineStart + 1; - - if (lineLengthBytes < 0) { - throw new IllegalStateException("Unexpected negative line length="+lineLengthBytes); - } - byte[] lineData = new byte[lineLengthBytes]; - System.arraycopy(data, lineStart, lineData, 0, lineLengthBytes); - - line = new String(lineData, encoding); - - currentLastBytePos = i - newLineMatchByteCount; - break; // found line - } - - // --- move cursor --- - i -= byteDecrement; - - // --- end of file part handling --- - if (i < 0) { - createLeftOver(); - break; // end of file part - } - } - - // --- last file part handling --- - if (isLastFilePart && leftOver != null) { - // there will be no line break anymore, this is the first line of the file - line = new String(leftOver, encoding); - leftOver = null; - } - - return line; - } - - /** - * Creates the buffer containing any left over bytes. - */ - private void createLeftOver() { - int lineLengthBytes = currentLastBytePos + 1; - if (lineLengthBytes > 0) { - // create left over for next block - leftOver = new byte[lineLengthBytes]; - System.arraycopy(data, 0, leftOver, 0, lineLengthBytes); - } else { - leftOver = null; - } - currentLastBytePos = -1; - } - - /** - * Finds the new-line sequence and return its length. - * - * @param data buffer to scan - * @param i start offset in buffer - * @return length of newline sequence or 0 if none found - */ - private int getNewLineMatchByteCount(byte[] data, int i) { - for (byte[] newLineSequence : newLineSequences) { - boolean match = true; - for (int j = newLineSequence.length - 1; j >= 0; j--) { - int k = i + j - (newLineSequence.length - 1); - match &= k >= 0 && data[k] == newLineSequence[j]; - } - if (match) { - return newLineSequence.length; - } - } - return 0; - } - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/TarEntry.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/TarEntry.java deleted file mode 100644 index 4d7e7dd..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/TarEntry.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - -import java.util.Comparator; - -/** - * A file entry location in a tar file. This is used for the index with a tar - * file. - */ -class TarEntry { - - static final Comparator OFFSET_ORDER = new Comparator() { - @Override - public int compare(TarEntry a, TarEntry b) { - if (a.offset > b.offset) { - return 1; - } else if (a.offset < b.offset) { - return -1; - } else { - return 0; - } - } - }; - - static final Comparator IDENTIFIER_ORDER = new Comparator() { - @Override - public int compare(TarEntry a, TarEntry b) { - if (a.msb > b.msb) { - return 1; - } else if (a.msb < b.msb) { - return -1; - } else if (a.lsb > b.lsb) { - return 1; - } else if (a.lsb < b.lsb) { - return -1; - } else { - return 0; - } - } - }; - - private final long msb; - - private final long lsb; - - private final int offset; - - private final int size; - - TarEntry(long msb, long lsb, int offset, int size) { - this.msb = msb; - this.lsb = lsb; - this.offset = offset; - this.size = size; - } - - long msb() { - return msb; - } - - long lsb() { - return lsb; - } - - int offset() { - return offset; - } - - int size() { - return size; - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/TarEntryVisitor.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/TarEntryVisitor.java deleted file mode 100644 index b146e6d..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/TarEntryVisitor.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - -import java.io.File; - -interface TarEntryVisitor { - - void visit(long msb, long lsb, File file, int offset, int size); - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/TarReader.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/TarReader.java deleted file mode 100644 index 1e16a5a..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/TarReader.java +++ /dev/null @@ -1,989 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static com.google.common.base.Charsets.UTF_8; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Lists.newArrayListWithCapacity; -import static com.google.common.collect.Maps.newHashMap; -import static com.google.common.collect.Maps.newLinkedHashMap; -import static com.google.common.collect.Maps.newTreeMap; -import static com.google.common.collect.Sets.newHashSet; -import static com.google.common.collect.Sets.newHashSetWithExpectedSize; -import static java.util.Collections.singletonList; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.REF_COUNT_OFFSET; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentId.isDataSegmentId; -import static org.apache.jackrabbit.oak.plugins.segment.file.TarWriter.GRAPH_MAGIC; - -import java.io.Closeable; -import java.io.File; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.SortedMap; -import java.util.UUID; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.zip.CRC32; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; - -import org.apache.commons.io.FileUtils; -import org.apache.jackrabbit.oak.plugins.segment.SegmentGraph.SegmentGraphVisitor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -class TarReader implements Closeable { - - /** Logger instance */ - private static final Logger log = LoggerFactory.getLogger(TarReader.class); - - private static final Logger GC_LOG = LoggerFactory.getLogger(TarReader.class.getName() + "-GC"); - - /** Magic byte sequence at the end of the index block. */ - private static final int INDEX_MAGIC = TarWriter.INDEX_MAGIC; - - /** - * Pattern of the segment entry names. Note the trailing (\\..*)? group - * that's included for compatibility with possible future extensions. - */ - private static final Pattern NAME_PATTERN = Pattern.compile( - "([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})" - + "(\\.([0-9a-f]{8}))?(\\..*)?"); - - /** The tar file block size. */ - private static final int BLOCK_SIZE = TarWriter.BLOCK_SIZE; - - static int getEntrySize(int size) { - return BLOCK_SIZE + size + TarWriter.getPaddingSize(size); - } - - static TarReader open(File file, boolean memoryMapping) throws IOException { - TarReader reader = openFirstFileWithValidIndex( - singletonList(file), memoryMapping); - if (reader != null) { - return reader; - } else { - throw new IOException("Failed to open tar file " + file); - } - } - - /** - * Creates a TarReader instance for reading content from a tar file. - * If there exist multiple generations of the same tar file, they are - * all passed to this method. The latest generation with a valid tar - * index (which is a good indication of general validity of the file) - * is opened and the other generations are removed to clean things up. - * If none of the generations has a valid index, then something must have - * gone wrong and we'll try recover as much content as we can from the - * existing tar generations. - * - * @param files - * @param memoryMapping - * @return - * @throws IOException - */ - static TarReader open(Map files, boolean memoryMapping) - throws IOException { - SortedMap sorted = newTreeMap(); - sorted.putAll(files); - - List list = newArrayList(sorted.values()); - Collections.reverse(list); - - TarReader reader = openFirstFileWithValidIndex(list, memoryMapping); - if (reader != null) { - return reader; - } - - // no generation has a valid index, so recover as much as we can - log.warn("Could not find a valid tar index in {}, recovering...", list); - LinkedHashMap entries = newLinkedHashMap(); - for (File file : sorted.values()) { - collectFileEntries(file, entries, true); - } - - // regenerate the first generation based on the recovered data - File file = sorted.values().iterator().next(); - generateTarFile(entries, file); - - reader = openFirstFileWithValidIndex(singletonList(file), memoryMapping); - if (reader != null) { - return reader; - } else { - throw new IOException("Failed to open recovered tar file " + file); - } - } - - static TarReader openRO(Map files, boolean memoryMapping, - boolean recover) throws IOException { - // for readonly store only try the latest generation of a given - // tar file to prevent any rollback or rewrite - File file = files.get(Collections.max(files.keySet())); - - TarReader reader = openFirstFileWithValidIndex(singletonList(file), - memoryMapping); - if (reader != null) { - return reader; - } - if (recover) { - log.warn( - "Could not find a valid tar index in {}, recovering read-only", - file); - // collecting the entries (without touching the original file) and - // writing them into an artificial tar file '.ro.bak' - LinkedHashMap entries = newLinkedHashMap(); - collectFileEntries(file, entries, false); - file = findAvailGen(file, ".ro.bak"); - generateTarFile(entries, file); - reader = openFirstFileWithValidIndex(singletonList(file), - memoryMapping); - if (reader != null) { - return reader; - } - } - - throw new IOException("Failed to open tar file " + file); - } - - /** - * Collects all entries from the given file and optionally backs-up the - * file, by renaming it to a ".bak" extension - * - * @param file - * @param entries - * @param backup - * @throws IOException - */ - private static void collectFileEntries(File file, - LinkedHashMap entries, boolean backup) - throws IOException { - log.info("Recovering segments from tar file {}", file); - try { - RandomAccessFile access = new RandomAccessFile(file, "r"); - try { - recoverEntries(file, access, entries); - } finally { - access.close(); - } - } catch (IOException e) { - log.warn("Could not read tar file " + file + ", skipping...", e); - } - - if (backup) { - backupSafely(file); - } - } - - /** - * Regenerates a tar file from a list of entries. - * - * @param entries - * @param file - * @throws IOException - */ - private static void generateTarFile(LinkedHashMap entries, - File file) throws IOException { - log.info("Regenerating tar file " + file); - TarWriter writer = new TarWriter(file); - for (Map.Entry entry : entries.entrySet()) { - UUID uuid = entry.getKey(); - byte[] data = entry.getValue(); - writer.writeEntry( - uuid.getMostSignificantBits(), - uuid.getLeastSignificantBits(), - data, 0, data.length); - } - writer.close(); - } - - /** - * Backup this tar file for manual inspection. Something went - * wrong earlier so we want to prevent the data from being - * accidentally removed or overwritten. - * - * @param file - * @throws IOException - */ - private static void backupSafely(File file) throws IOException { - File backup = findAvailGen(file, ".bak"); - log.info("Backing up " + file + " to " + backup.getName()); - if (!file.renameTo(backup)) { - log.warn("Renaming failed, so using copy to backup {}", file); - FileUtils.copyFile(file, backup); - if (!file.delete()) { - throw new IOException( - "Could not remove broken tar file " + file); - } - } - } - - /** - * Fine next available generation number so that a generated file doesn't - * overwrite another existing file. - * - * @param file - * @throws IOException - */ - private static File findAvailGen(File file, String ext) { - File parent = file.getParentFile(); - String name = file.getName(); - File backup = new File(parent, name + ext); - for (int i = 2; backup.exists(); i++) { - backup = new File(parent, name + "." + i + ext); - } - return backup; - } - - private static TarReader openFirstFileWithValidIndex(List files, boolean memoryMapping) { - for (File file : files) { - String name = file.getName(); - try { - RandomAccessFile access = new RandomAccessFile(file, "r"); - try { - ByteBuffer index = loadAndValidateIndex(access, name); - if (index == null) { - log.info("No index found in tar file {}, skipping...", name); - } else { - // found a file with a valid index, drop the others - for (File other : files) { - if (other != file) { - log.info("Removing unused tar file {}", other.getName()); - other.delete(); - } - } - - if (memoryMapping) { - try { - FileAccess mapped = new FileAccess.Mapped(access); - // re-read the index, now with memory mapping - int indexSize = index.remaining(); - index = mapped.read( - mapped.length() - indexSize - 16 - 1024, - indexSize); - return new TarReader(file, mapped, index); - } catch (IOException e) { - log.warn("Failed to mmap tar file " + name - + ". Falling back to normal file IO," - + " which will negatively impact" - + " repository performance. This" - + " problem may have been caused by" - + " restrictions on the amount of" - + " virtual memory available to the" - + " JVM. Please make sure that a" - + " 64-bit JVM is being used and" - + " that the process has access to" - + " unlimited virtual memory" - + " (ulimit option -v).", - e); - } - } - - FileAccess random = new FileAccess.Random(access); - // prevent the finally block from closing the file - // as the returned TarReader will take care of that - access = null; - return new TarReader(file, random, index); - } - } finally { - if (access != null) { - access.close(); - } - } - } catch (IOException e) { - log.warn("Could not read tar file " + name + ", skipping...", e); - } - } - - return null; - } - - /** - * Tries to read an existing index from the given tar file. The index is - * returned if it is found and looks valid (correct checksum, passes - * sanity checks). - * - * @param file tar file - * @param name name of the tar file, for logging purposes - * @return tar index, or {@code null} if not found or not valid - * @throws IOException if the tar file could not be read - */ - private static ByteBuffer loadAndValidateIndex( - RandomAccessFile file, String name) - throws IOException { - long length = file.length(); - if (length % BLOCK_SIZE != 0 - || length < 6 * BLOCK_SIZE - || length > Integer.MAX_VALUE) { - log.warn("Unexpected size {} of tar file {}", length, name); - return null; // unexpected file size - } - - // read the index metadata just before the two final zero blocks - ByteBuffer meta = ByteBuffer.allocate(16); - file.seek(length - 2 * BLOCK_SIZE - 16); - file.readFully(meta.array()); - int crc32 = meta.getInt(); - int count = meta.getInt(); - int bytes = meta.getInt(); - int magic = meta.getInt(); - - if (magic != INDEX_MAGIC) { - return null; // magic byte mismatch - } - - if (count < 1 || bytes < count * 24 + 16 || bytes % BLOCK_SIZE != 0) { - log.warn("Invalid index metadata in tar file {}", name); - return null; // impossible entry and/or byte counts - } - - // this involves seeking backwards in the file, which might not - // perform well, but that's OK since we only do this once per file - ByteBuffer index = ByteBuffer.allocate(count * 24); - file.seek(length - 2 * BLOCK_SIZE - 16 - count * 24); - file.readFully(index.array()); - index.mark(); - - CRC32 checksum = new CRC32(); - long limit = length - 2 * BLOCK_SIZE - bytes - BLOCK_SIZE; - long lastmsb = Long.MIN_VALUE; - long lastlsb = Long.MIN_VALUE; - byte[] entry = new byte[24]; - for (int i = 0; i < count; i++) { - index.get(entry); - checksum.update(entry); - - ByteBuffer buffer = ByteBuffer.wrap(entry); - long msb = buffer.getLong(); - long lsb = buffer.getLong(); - int offset = buffer.getInt(); - int size = buffer.getInt(); - - if (lastmsb > msb || (lastmsb == msb && lastlsb > lsb)) { - log.warn("Incorrect index ordering in tar file {}", name); - return null; - } else if (lastmsb == msb && lastlsb == lsb && i > 0) { - log.warn("Duplicate index entry in tar file {}", name); - return null; - } else if (offset < 0 || offset % BLOCK_SIZE != 0) { - log.warn("Invalid index entry offset in tar file {}", name); - return null; - } else if (size < 1 || offset + size > limit) { - log.warn("Invalid index entry size in tar file {}", name); - return null; - } - - lastmsb = msb; - lastlsb = lsb; - } - - if (crc32 != (int) checksum.getValue()) { - log.warn("Invalid index checksum in tar file {}", name); - return null; // checksum mismatch - } - - index.reset(); - return index; - } - - /** - * Scans through the tar file, looking for all segment entries. - * - * @throws IOException if the tar file could not be read - */ - private static void recoverEntries( - File file, RandomAccessFile access, - LinkedHashMap entries) throws IOException { - byte[] header = new byte[BLOCK_SIZE]; - while (access.getFilePointer() + BLOCK_SIZE <= access.length()) { - // read the tar header block - access.readFully(header); - - // compute the header checksum - int sum = 0; - for (int i = 0; i < BLOCK_SIZE; i++) { - sum += header[i] & 0xff; - } - - // identify possible zero block - if (sum == 0 && access.getFilePointer() + 2 * BLOCK_SIZE == access.length()) { - return; // found the zero blocks at the end of the file - } - - // replace the actual stored checksum with spaces for comparison - for (int i = 148; i < 148 + 8; i++) { - sum -= header[i] & 0xff; - sum += ' '; - } - - byte[] checkbytes = String.format("%06o\0 ", sum).getBytes(UTF_8); - for (int i = 0; i < checkbytes.length; i++) { - if (checkbytes[i] != header[148 + i]) { - log.warn("Invalid entry checksum at offset {} in tar file {}, skipping...", - access.getFilePointer() - BLOCK_SIZE, file); - } - } - - // The header checksum passes, so read the entry name and size - ByteBuffer buffer = ByteBuffer.wrap(header); - String name = readString(buffer, 100); - buffer.position(124); - int size = readNumber(buffer, 12); - if (access.getFilePointer() + size > access.length()) { - // checksum was correct, so the size field should be accurate - log.warn("Partial entry {} in tar file {}, ignoring...", name, file); - return; - } - - Matcher matcher = NAME_PATTERN.matcher(name); - if (matcher.matches()) { - UUID id = UUID.fromString(matcher.group(1)); - - String checksum = matcher.group(3); - if (checksum != null || !entries.containsKey(id)) { - byte[] data = new byte[size]; - access.readFully(data); - - // skip possible padding to stay at block boundaries - long position = access.getFilePointer(); - long remainder = position % BLOCK_SIZE; - if (remainder != 0) { - access.seek(position + (BLOCK_SIZE - remainder)); - } - - if (checksum != null) { - CRC32 crc = new CRC32(); - crc.update(data); - if (crc.getValue() != Long.parseLong(checksum, 16)) { - log.warn("Checksum mismatch in entry {} of tar file {}, skipping...", - name, file); - continue; - } - } - - entries.put(id, data); - } - } else if (!name.equals(file.getName() + ".idx")) { - log.warn("Unexpected entry {} in tar file {}, skipping...", - name, file); - long position = access.getFilePointer() + size; - long remainder = position % BLOCK_SIZE; - if (remainder != 0) { - position += BLOCK_SIZE - remainder; - } - access.seek(position); - } - } - } - - private final File file; - - private final FileAccess access; - - private final ByteBuffer index; - - private volatile boolean closed; - - private TarReader(File file, FileAccess access, ByteBuffer index) { - this.file = file; - this.access = access; - this.index = index; - } - - long size() { - return file.length(); - } - - /** - * Returns the number of segments in this tar file. - * - * @return number of segments - */ - int count() { - return index.capacity() / 24; - } - - /** - * Iterates over all entries in this tar file and calls - * {@link TarEntryVisitor#visit(long, long, File, int, int)} on them. - * - * @param visitor entry visitor - */ - void accept(TarEntryVisitor visitor) { - int position = index.position(); - while (position < index.limit()) { - visitor.visit( - index.getLong(position), - index.getLong(position + 8), - file, - index.getInt(position + 16), - index.getInt(position + 20)); - position += 24; - } - } - - Set getUUIDs() { - Set uuids = newHashSetWithExpectedSize(index.remaining() / 24); - int position = index.position(); - while (position < index.limit()) { - uuids.add(new UUID( - index.getLong(position), - index.getLong(position + 8))); - position += 24; - } - return uuids; - } - - boolean containsEntry(long msb, long lsb) { - return findEntry(msb, lsb) != -1; - } - - /** - * If the given segment is in this file, get the byte buffer that allows - * reading it. - *

- * Whether or not this will read from the file depends on whether memory - * mapped files are used or not. - * - * @param msb the most significant bits of the segment id - * @param lsb the least significant bits of the segment id - * @return the byte buffer, or null if not in this file - */ - ByteBuffer readEntry(long msb, long lsb) throws IOException { - int position = findEntry(msb, lsb); - if (position != -1) { - return access.read( - index.getInt(position + 16), - index.getInt(position + 20)); - } else { - return null; - } - } - - /** - * Find the position of the given segment in the tar file. - * It uses the tar index if available. - * - * @param msb the most significant bits of the segment id - * @param lsb the least significant bits of the segment id - * @return the position in the file, or -1 if not found - */ - private int findEntry(long msb, long lsb) { - // The segment identifiers are randomly generated with uniform - // distribution, so we can use interpolation search to find the - // matching entry in the index. The average runtime is O(log log n). - - int lowIndex = 0; - int highIndex = index.remaining() / 24 - 1; - float lowValue = Long.MIN_VALUE; - float highValue = Long.MAX_VALUE; - float targetValue = msb; - - while (lowIndex <= highIndex) { - int guessIndex = lowIndex + Math.round( - (highIndex - lowIndex) - * (targetValue - lowValue) - / (highValue - lowValue)); - int position = index.position() + guessIndex * 24; - long m = index.getLong(position); - if (msb < m) { - highIndex = guessIndex - 1; - highValue = m; - } else if (msb > m) { - lowIndex = guessIndex + 1; - lowValue = m; - } else { - // getting close... - long l = index.getLong(position + 8); - if (lsb < l) { - highIndex = guessIndex - 1; - highValue = m; - } else if (lsb > l) { - lowIndex = guessIndex + 1; - lowValue = m; - } else { - // found it! - return position; - } - } - } - - // not found - return -1; - } - - @Nonnull - private TarEntry[] getEntries() { - TarEntry[] entries = new TarEntry[index.remaining() / 24]; - int position = index.position(); - for (int i = 0; position < index.limit(); i++) { - entries[i] = new TarEntry( - index.getLong(position), - index.getLong(position + 8), - index.getInt(position + 16), - index.getInt(position + 20)); - position += 24; - } - Arrays.sort(entries, TarEntry.OFFSET_ORDER); - return entries; - } - - @CheckForNull - private List getReferences(TarEntry entry, UUID id, Map> graph) throws IOException { - if (graph != null) { - return graph.get(id); - } else { - // a pre-compiled graph is not available, so read the - // references directly from this segment - ByteBuffer segment = access.read( - entry.offset(), - Math.min(entry.size(), 16 * 256)); - int pos = segment.position(); - int refCount = segment.get(pos + REF_COUNT_OFFSET) & 0xff; - int refEnd = pos + 16 * (refCount + 1); - List refIds = newArrayList(); - for (int refPos = pos + 16; refPos < refEnd; refPos += 16) { - refIds.add(new UUID( - segment.getLong(refPos), - segment.getLong(refPos + 8))); - } - return refIds; - } - } - - /** - * Build the graph of segments reachable from an initial set of segments - * @param roots the initial set of segments - * @param visitor visitor receiving call back while following the segment graph - * @throws IOException - */ - public void traverseSegmentGraph( - @Nonnull Set roots, - @Nonnull SegmentGraphVisitor visitor) throws IOException { - checkNotNull(roots); - checkNotNull(visitor); - Map> graph = getGraph(); - - TarEntry[] entries = getEntries(); - for (int i = entries.length - 1; i >= 0; i--) { - TarEntry entry = entries[i]; - UUID id = new UUID(entry.msb(), entry.lsb()); - if (roots.remove(id) && isDataSegmentId(entry.lsb())) { - // this is a referenced data segment, so follow the graph - List refIds = getReferences(entry, id, graph); - if (refIds != null) { - for (UUID refId : refIds) { - visitor.accept(id, refId); - roots.add(refId); - } - } else { - visitor.accept(id, null); - } - } else { - // this segment is not referenced anywhere - visitor.accept(id, null); - } - } - } - - /** - * Calculate the ids of the segments directly referenced from {@code referenceIds} - * through forward references. - * - * @param referencedIds The initial set of ids to start from. On return it - * contains the set of direct forward references. - * - * @throws IOException - */ - void calculateForwardReferences(Set referencedIds) throws IOException { - Map> graph = getGraph(); - TarEntry[] entries = getEntries(); - for (int i = entries.length - 1; i >= 0; i--) { - TarEntry entry = entries[i]; - UUID id = new UUID(entry.msb(), entry.lsb()); - if (referencedIds.remove(id)) { - if (isDataSegmentId(entry.lsb())) { - // this is a referenced data segment, so follow the graph - List refIds = getReferences(entry, id, graph); - if (refIds != null) { - referencedIds.addAll(refIds); - } - } - } - } - } - - /** - * Garbage collects segments in this file. First it collects the set of - * segments that are referenced / reachable, then (if more than 25% is - * garbage) creates a new generation of the file. - *

- * The old generation files are not removed (they can't easily be removed, - * for memory mapped files). - * - * @param referencedIds the referenced segment ids (input and output). - * @param removed a set which will receive the uuids of all segments that - * have been cleaned. - * @return this (if the file is kept as is), or the new generation file, or - * null if the file is fully garbage - */ - synchronized TarReader cleanup(Set referencedIds, Set removed) throws IOException { - String name = file.getName(); - log.debug("Cleaning up {}", name); - - Set cleaned = newHashSet(); - Map> graph = getGraph(); - TarEntry[] entries = getEntries(); - - int size = 0; - int count = 0; - for (int i = entries.length - 1; i >= 0; i--) { - TarEntry entry = entries[i]; - UUID id = new UUID(entry.msb(), entry.lsb()); - if (!referencedIds.remove(id)) { - // this segment is not referenced anywhere - cleaned.add(id); - entries[i] = null; - } else { - size += getEntrySize(entry.size()); - count += 1; - if (isDataSegmentId(entry.lsb())) { - // this is a referenced data segment, so follow the graph - List refIds = getReferences(entry, id, graph); - if (refIds != null) { - referencedIds.addAll(refIds); - } - } - } - } - size += getEntrySize(24 * count + 16); - size += 2 * BLOCK_SIZE; - - if (count == 0) { - log.debug("None of the entries of {} are referenceable.", name); - removed.addAll(cleaned); - logCleanedSegments(cleaned); - return null; - } else if (size >= access.length() * 3 / 4 && graph != null) { - // the space savings are not worth it at less than 25%, - // unless this tar file lacks a pre-compiled segment graph - // in which case we'll always generate a new tar file with - // the graph to speed up future garbage collection runs. - log.debug("Not enough space savings. ({}/{}). Skipping clean up of {}", - access.length() - size, access.length(), name); - return this; - } - - int pos = name.length() - "a.tar".length(); - char generation = name.charAt(pos); - if (generation == 'z') { - log.debug("No garbage collection after reaching generation z: {}", name); - return this; - } - - File newFile = new File( - file.getParentFile(), - name.substring(0, pos) + (char) (generation + 1) + ".tar"); - - log.debug("Writing new generation {}", newFile.getName()); - TarWriter writer = new TarWriter(newFile); - for (TarEntry entry : entries) { - if (entry != null) { - byte[] data = new byte[entry.size()]; - access.read(entry.offset(), entry.size()).get(data); - writer.writeEntry( - entry.msb(), entry.lsb(), data, 0, entry.size()); - } - } - writer.close(); - - TarReader reader = openFirstFileWithValidIndex( - singletonList(newFile), access.isMemoryMapped()); - if (reader != null) { - logCleanedSegments(cleaned); - removed.addAll(cleaned); - return reader; - } else { - log.warn("Failed to open cleaned up tar file {}", file); - return this; - } - } - - // FIXME OAK-4165: Too verbose logging during revision gc - private void logCleanedSegments(Set cleaned) { - StringBuilder uuids = new StringBuilder(); - String newLine = System.getProperty("line.separator", "\n") + " "; - - int c = 0; - String sep = ""; - for (UUID uuid : cleaned) { - uuids.append(sep); - if (c++ % 4 == 0) { - uuids.append(newLine); - } - uuids.append(uuid); - sep = ", "; - } - - GC_LOG.info("TarMK cleaned segments from {}: {}", file.getName(), uuids); - } - - /** - * @return {@code true} iff this reader has been closed - * @see #close() - */ - boolean isClosed() { - return closed; - } - - @Override - public void close() throws IOException { - closed = true; - access.close(); - } - - //-----------------------------------------------------------< private >-- - - /** - * Loads and parses the optional pre-compiled graph entry from the given tar - * file. - * - * @return the parsed graph, or {@code null} if one was not found - * @throws IOException if the tar file could not be read - */ - Map> getGraph() throws IOException { - ByteBuffer graph = loadGraph(); - if (graph == null) { - return null; - } else { - return parseGraph(graph); - } - } - - /** - * Loads the optional pre-compiled graph entry from the given tar file. - * - * @return graph buffer, or {@code null} if one was not found - * @throws IOException if the tar file could not be read - */ - private ByteBuffer loadGraph() throws IOException { - // read the graph metadata just before the tar index entry - int pos = access.length() - 2 * BLOCK_SIZE - getEntrySize(index.remaining()); - ByteBuffer meta = access.read(pos - 16, 16); - int crc32 = meta.getInt(); - int count = meta.getInt(); - int bytes = meta.getInt(); - int magic = meta.getInt(); - - if (magic != GRAPH_MAGIC) { - return null; // magic byte mismatch - } - - if (count < 0 || bytes < count * 16 + 16 || BLOCK_SIZE + bytes > pos) { - log.warn("Invalid graph metadata in tar file {}", file); - return null; // impossible uuid and/or byte counts - } - - // this involves seeking backwards in the file, which might not - // perform well, but that's OK since we only do this once per file - ByteBuffer graph = access.read(pos - bytes, bytes); - - byte[] b = new byte[bytes - 16]; - graph.mark(); - graph.get(b); - graph.reset(); - - CRC32 checksum = new CRC32(); - checksum.update(b); - if (crc32 != (int) checksum.getValue()) { - log.warn("Invalid graph checksum in tar file {}", file); - return null; // checksum mismatch - } - - return graph; - } - - private static Map> parseGraph(ByteBuffer graphByteBuffer) { - int count = graphByteBuffer.getInt(graphByteBuffer.limit() - 12); - - ByteBuffer buffer = graphByteBuffer.duplicate(); - buffer.limit(graphByteBuffer.limit() - 16); - - List uuids = newArrayListWithCapacity(count); - for (int i = 0; i < count; i++) { - uuids.add(new UUID(buffer.getLong(), buffer.getLong())); - } - - Map> graph = newHashMap(); - while (buffer.hasRemaining()) { - UUID uuid = uuids.get(buffer.getInt()); - List list = newArrayList(); - int refid = buffer.getInt(); - while (refid != -1) { - list.add(uuids.get(refid)); - refid = buffer.getInt(); - } - graph.put(uuid, list); - } - return graph; - } - - private static String readString(ByteBuffer buffer, int fieldSize) { - byte[] b = new byte[fieldSize]; - buffer.get(b); - int n = 0; - while (n < fieldSize && b[n] != 0) { - n++; - } - return new String(b, 0, n, UTF_8); - } - - private static int readNumber(ByteBuffer buffer, int fieldSize) { - byte[] b = new byte[fieldSize]; - buffer.get(b); - int number = 0; - for (int i = 0; i < fieldSize; i++) { - int digit = b[i] & 0xff; - if ('0' <= digit && digit <= '7') { - number = number * 8 + digit - '0'; - } else { - break; - } - } - return number; - } - - File getFile() { - return file; - } - - //------------------------------------------------------------< Object >-- - - @Override - public String toString() { - return file.toString(); - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/TarWriter.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/TarWriter.java deleted file mode 100644 index b00400f..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/TarWriter.java +++ /dev/null @@ -1,505 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static com.google.common.base.Charsets.UTF_8; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkPositionIndexes; -import static com.google.common.base.Preconditions.checkState; -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Lists.reverse; -import static com.google.common.collect.Maps.newHashMap; -import static com.google.common.collect.Maps.newLinkedHashMap; -import static com.google.common.collect.Maps.newTreeMap; -import static com.google.common.collect.Sets.newHashSet; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.REF_COUNT_OFFSET; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentId.isDataSegmentId; - -import java.io.Closeable; -import java.io.File; -import java.io.FileDescriptor; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.SortedMap; -import java.util.UUID; -import java.util.zip.CRC32; - -import com.google.common.collect.Lists; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A writer for tar files. It is also used to read entries while the file is - * still open. - */ -class TarWriter implements Closeable { - - /** Logger instance */ - private static final Logger log = LoggerFactory.getLogger(TarWriter.class); - - /** - * Magic byte sequence at the end of the index block. - *

- *

    - *
  • For each segment in that file, an index entry that contains the UUID, - * the offset within the file and the size of the segment. Sorted by UUID, - * to allow using interpolation search.
  • - *
  • - * The index footer, which contains metadata of the index (the size, - * checksum).
  • - *
- */ - static final int INDEX_MAGIC = - ('\n' << 24) + ('0' << 16) + ('K' << 8) + '\n'; - - /** - * Magic byte sequence at the end of the graph block. - *

- * The file is read from the end (the tar file is read from the end: the - * last entry is the index, then the graph). File format: - *

    - *
  • 0 padding to make the footer end at a 512 byte boundary
  • - *
  • The list of UUIDs (segments included the graph; this includes - * segments in this tar file, and referenced segments in tar files with a - * lower sequence number). 16 bytes each.
  • - *
  • The graph data. The index of the source segment UUID (in the above - * list, 4 bytes), then the list of referenced segments (the indexes of - * those; 4 bytes each). Then the list is terminated by -1.
  • - *
  • The last part is the footer, which contains metadata of the graph - * (size, checksum, the number of UUIDs).
  • - *
- * - */ - static final int GRAPH_MAGIC = - ('\n' << 24) + ('0' << 16) + ('G' << 8) + '\n'; - - /** The tar file block size. */ - static final int BLOCK_SIZE = 512; - - private static final byte[] ZERO_BYTES = new byte[BLOCK_SIZE]; - - static final int getPaddingSize(int size) { - int remainder = size % BLOCK_SIZE; - if (remainder > 0) { - return BLOCK_SIZE - remainder; - } else { - return 0; - } - } - - /** - * The file being written. This instance is also used as an additional - * synchronization point by {@link #flush()} and {@link #close()} to - * allow {@link #flush()} to work concurrently with normal reads and - * writes, but not with a concurrent {@link #close()}. - */ - private final File file; - - private final FileStoreMonitor monitor; - - /** - * File handle. Initialized lazily in - * {@link #writeEntry(long, long, byte[], int, int)} to avoid creating - * an extra empty file when just reading from the repository. - * Should only be accessed from synchronized code. - */ - private RandomAccessFile access = null; - - private FileChannel channel = null; - - /** - * Flag to indicate a closed writer. Accessing a closed writer is illegal. - * Should only be accessed from synchronized code. - */ - private boolean closed = false; - - /** - * Map of the entries that have already been written. Used by the - * {@link #containsEntry(long, long)} and {@link #readEntry(long, long)} - * methods to retrieve data from this file while it's still being written, - * and finally by the {@link #close()} method to generate the tar index. - * The map is ordered in the order that entries have been written. - *

- * Should only be accessed from synchronized code. - */ - private final Map index = newLinkedHashMap(); - - private final Set references = newHashSet(); - - /** - * Segment graph of the entries that have already been written. - */ - private final SortedMap> graph = newTreeMap(); - - TarWriter(File file) { - this(file, FileStoreMonitor.DEFAULT); - } - - TarWriter(File file, FileStoreMonitor monitor) { - this.file = file; - this.monitor = monitor; - } - - /** - * Returns the number of segments written so far to this tar file. - * - * @return number of segments written so far - */ - synchronized int count() { - return index.size(); - } - - synchronized Set getUUIDs() { - return newHashSet(index.keySet()); - } - - synchronized boolean containsEntry(long msb, long lsb) { - checkState(!closed); - return index.containsKey(new UUID(msb, lsb)); - } - - /** - * If the given segment is in this file, get the byte buffer that allows - * reading it. - * - * @param msb the most significant bits of the segment id - * @param lsb the least significant bits of the segment id - * @return the byte buffer, or null if not in this file - */ - ByteBuffer readEntry(long msb, long lsb) throws IOException { - checkState(!closed); - - TarEntry entry; - synchronized (this) { - entry = index.get(new UUID(msb, lsb)); - } - if (entry != null) { - checkState(channel != null); // implied by entry != null - ByteBuffer data = ByteBuffer.allocate(entry.size()); - channel.read(data, entry.offset()); - data.rewind(); - return data; - } else { - return null; - } - } - - long writeEntry( - long msb, long lsb, byte[] data, int offset, int size) - throws IOException { - checkNotNull(data); - checkPositionIndexes(offset, offset + size, data.length); - - UUID uuid = new UUID(msb, lsb); - CRC32 checksum = new CRC32(); - checksum.update(data, offset, size); - String entryName = String.format("%s.%08x", uuid, checksum.getValue()); - byte[] header = newEntryHeader(entryName, size); - - log.debug("Writing segment {} to {}", uuid, file); - return writeEntry(uuid, header, data, offset, size); - } - - private synchronized long writeEntry( - UUID uuid, byte[] header, byte[] data, int offset, int size) - throws IOException { - checkState(!closed); - if (access == null) { - access = new RandomAccessFile(file, "rw"); - channel = access.getChannel(); - } - - long initialLength = access.getFilePointer(); - access.write(header); - access.write(data, offset, size); - int padding = getPaddingSize(size); - if (padding > 0) { - access.write(ZERO_BYTES, 0, padding); - } - - long currentLength = access.getFilePointer(); - checkState(currentLength <= Integer.MAX_VALUE); - TarEntry entry = new TarEntry( - uuid.getMostSignificantBits(), uuid.getLeastSignificantBits(), - (int) (currentLength - size - padding), size); - index.put(uuid, entry); - - if (isDataSegmentId(uuid.getLeastSignificantBits())) { - ByteBuffer segment = ByteBuffer.wrap(data, offset, size); - int pos = segment.position(); - int refcount = segment.get(pos + REF_COUNT_OFFSET) & 0xff; - if (refcount != 0) { - int refend = pos + 16 * (refcount + 1); - List list = Lists.newArrayListWithCapacity(refcount); - for (int refpos = pos + 16; refpos < refend; refpos += 16) { - UUID refid = new UUID( - segment.getLong(refpos), - segment.getLong(refpos + 8)); - if (!index.containsKey(refid)) { - references.add(refid); - } - list.add(refid); - } - Collections.sort(list); - graph.put(uuid, list); - } - } - - monitor.written(currentLength - initialLength); - return currentLength; - } - - /** - * Flushes the entries that have so far been written to the disk. - * This method is not synchronized to allow concurrent reads - * and writes to proceed while the file is being flushed. However, - * this method is carefully synchronized with {@link #close()} - * to prevent accidental flushing of an already closed file. - * - * @throws IOException if the tar file could not be flushed - */ - void flush() throws IOException { - synchronized (file) { - FileDescriptor descriptor = null; - - synchronized (this) { - if (access != null && !closed) { - descriptor = access.getFD(); - } - } - - if (descriptor != null) { - descriptor.sync(); - } - } - } - - boolean isDirty() { - return access != null; - } - - /** - * Closes this tar file. - * - * @throws IOException if the tar file could not be closed - */ - @Override - public void close() throws IOException { - // Mark this writer as closed. Note that we only need to synchronize - // this part, as no other synchronized methods should get invoked - // once close() has been initiated (see related checkState calls). - synchronized (this) { - checkState(!closed); - closed = true; - } - - // If nothing was written to this file, then we're already done. - if (access == null) { - return; - } - - // Complete the tar file by adding the graph, the index and the - // trailing two zero blocks. This code is synchronized on the file - // instance to ensure that no concurrent thread is still flushing - // the file when we close the file handle. - long initialPosition, currentPosition; - synchronized (file) { - initialPosition = access.getFilePointer(); - writeGraph(); - writeIndex(); - access.write(ZERO_BYTES); - access.write(ZERO_BYTES); - - currentPosition = access.getFilePointer(); - access.close(); - } - - monitor.written(currentPosition - initialPosition); - } - - private void writeGraph() throws IOException { - List uuids = Lists.newArrayListWithCapacity( - index.size() + references.size()); - uuids.addAll(index.keySet()); - uuids.addAll(references); - Collections.sort(uuids); - - int graphSize = uuids.size() * 16 + 16; - for (List list : graph.values()) { - graphSize += 4 + list.size() * 4 + 4; - } - int padding = getPaddingSize(graphSize); - - String graphName = file.getName() + ".gph"; - byte[] header = newEntryHeader(graphName, graphSize + padding); - - ByteBuffer buffer = ByteBuffer.allocate(graphSize); - - Map refmap = newHashMap(); - - int index = 0; - for (UUID uuid : uuids) { - buffer.putLong(uuid.getMostSignificantBits()); - buffer.putLong(uuid.getLeastSignificantBits()); - refmap.put(uuid, index++); - } - - for (Map.Entry> entry : graph.entrySet()) { - buffer.putInt(refmap.get(entry.getKey())); - for (UUID refid : entry.getValue()) { - buffer.putInt(refmap.get(refid)); - } - buffer.putInt(-1); - } - - CRC32 checksum = new CRC32(); - checksum.update(buffer.array(), 0, buffer.position()); - buffer.putInt((int) checksum.getValue()); - buffer.putInt(uuids.size()); - buffer.putInt(graphSize); - buffer.putInt(GRAPH_MAGIC); - - access.write(header); - if (padding > 0) { - // padding comes *before* the graph! - access.write(ZERO_BYTES, 0, padding); - } - access.write(buffer.array()); - } - - private void writeIndex() throws IOException { - int indexSize = index.size() * 24 + 16; - int padding = getPaddingSize(indexSize); - - String indexName = file.getName() + ".idx"; - byte[] header = newEntryHeader(indexName, indexSize + padding); - - ByteBuffer buffer = ByteBuffer.allocate(indexSize); - TarEntry[] sorted = index.values().toArray(new TarEntry[index.size()]); - Arrays.sort(sorted, TarEntry.IDENTIFIER_ORDER); - for (TarEntry entry : sorted) { - buffer.putLong(entry.msb()); - buffer.putLong(entry.lsb()); - buffer.putInt(entry.offset()); - buffer.putInt(entry.size()); - } - - CRC32 checksum = new CRC32(); - checksum.update(buffer.array(), 0, buffer.position()); - buffer.putInt((int) checksum.getValue()); - buffer.putInt(index.size()); - buffer.putInt(padding + indexSize); - buffer.putInt(INDEX_MAGIC); - - access.write(header); - if (padding > 0) { - // padding comes *before* the index! - access.write(ZERO_BYTES, 0, padding); - } - access.write(buffer.array()); - } - - private static byte[] newEntryHeader(String name, int size) { - byte[] header = new byte[BLOCK_SIZE]; - - // File name - byte[] nameBytes = name.getBytes(UTF_8); - System.arraycopy( - nameBytes, 0, header, 0, Math.min(nameBytes.length, 100)); - - // File mode - System.arraycopy( - String.format("%07o", 0400).getBytes(UTF_8), 0, - header, 100, 7); - - // User's numeric user ID - System.arraycopy( - String.format("%07o", 0).getBytes(UTF_8), 0, - header, 108, 7); - - // Group's numeric user ID - System.arraycopy( - String.format("%07o", 0).getBytes(UTF_8), 0, - header, 116, 7); - - // File size in bytes (octal basis) - System.arraycopy( - String.format("%011o", size).getBytes(UTF_8), 0, - header, 124, 11); - - // Last modification time in numeric Unix time format (octal) - long time = System.currentTimeMillis() / 1000; - System.arraycopy( - String.format("%011o", time).getBytes(UTF_8), 0, - header, 136, 11); - - // Checksum for header record - System.arraycopy( - new byte[] {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '}, 0, - header, 148, 8); - - // Type flag - header[156] = '0'; - - // Compute checksum - int checksum = 0; - for (int i = 0; i < header.length; i++) { - checksum += header[i] & 0xff; - } - System.arraycopy( - String.format("%06o\0 ", checksum).getBytes(UTF_8), 0, - header, 148, 8); - - return header; - } - - /** - * Add all segment ids that are reachable from {@code referencedIds} via - * this writer's segment graph and subsequently remove those segment ids - * from {@code referencedIds} that are in this {@code TarWriter}. The - * latter can't be cleaned up anyway because they are not be present in - * any of the readers. - * - * @param referencedIds - * @throws IOException - */ - synchronized void collectReferences(Set referencedIds) { - for (UUID uuid : reverse(newArrayList(index.keySet()))) { - if (referencedIds.remove(uuid)) { - List refs = graph.get(uuid); - if (refs != null) { - referencedIds.addAll(refs); - } - } - } - } - - //------------------------------------------------------------< Object >-- - - @Override - public String toString() { - return file.toString(); - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/tooling/BasicReadOnlyBlobStore.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/tooling/BasicReadOnlyBlobStore.java deleted file mode 100644 index 1c6593e..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/tooling/BasicReadOnlyBlobStore.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file.tooling; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; - -import org.apache.jackrabbit.oak.spi.blob.BlobOptions; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; - -/** - * Utility BlobStore implementation to be used in tooling that can work with a - * FileStore without the need of the DataStore being present locally - */ -@Deprecated -public class BasicReadOnlyBlobStore implements BlobStore { - - @Override - @Deprecated - public String writeBlob(InputStream in) throws IOException { - throw new UnsupportedOperationException(); - } - - /** - * Ignores the options provided and delegates to {@link #writeBlob(InputStream)}. - * - * @param in the input stream to write - * @param options the options to use - * @return - * @throws IOException - */ - @Override - @Deprecated - public String writeBlob(InputStream in, BlobOptions options) throws IOException { - return writeBlob(in); - } - - @Override - @Deprecated - public int readBlob(String blobId, long pos, byte[] buff, int off, - int length) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - @Deprecated - public long getBlobLength(String blobId) throws IOException { - // best effort length extraction - int indexOfSep = blobId.lastIndexOf("#"); - if (indexOfSep != -1) { - return Long.valueOf(blobId.substring(indexOfSep + 1)); - } - return -1; - } - - @Override - @Deprecated - public InputStream getInputStream(String blobId) throws IOException { - return new ByteArrayInputStream(new byte[0]); - } - - @Override - @Deprecated - public String getBlobId(String reference) { - return reference; - } - - @Override - @Deprecated - public String getReference(String blobId) { - return blobId; - } -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/tooling/ConsistencyChecker.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/tooling/ConsistencyChecker.java deleted file mode 100644 index 96b9677..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/tooling/ConsistencyChecker.java +++ /dev/null @@ -1,302 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file.tooling; - -import static com.google.common.collect.Sets.newHashSet; -import static java.lang.Math.min; -import static org.apache.jackrabbit.oak.api.Type.BINARIES; -import static org.apache.jackrabbit.oak.api.Type.BINARY; -import static org.apache.jackrabbit.oak.commons.PathUtils.concat; -import static org.apache.jackrabbit.oak.commons.PathUtils.denotesRoot; -import static org.apache.jackrabbit.oak.commons.PathUtils.getName; -import static org.apache.jackrabbit.oak.commons.PathUtils.getParentPath; -import static org.apache.jackrabbit.oak.spi.state.NodeStateUtils.getNode; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.util.Set; - -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.plugins.segment.SegmentBlob; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore.ReadOnlyStore; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.plugins.segment.file.JournalReader; -import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Utility for checking the files of a - * {@link org.apache.jackrabbit.oak.plugins.segment.file.FileStore} for inconsistency and - * reporting that latest consistent revision. - */ -@Deprecated -public class ConsistencyChecker { - private static final Logger LOG = LoggerFactory.getLogger(ConsistencyChecker.class); - - private final ReadOnlyStore store; - private final long debugInterval; - - /** - * Run a consistency check. - * - * @param directory directory containing the tar files - * @param journalFileName name of the journal file containing the revision history - * @param fullTraversal full traversal consistency check if {@code true}. Only try - * to access the root node otherwise. - * @param debugInterval number of seconds between printing progress information to - * the console during the full traversal phase. - * @param binLen number of bytes to read from binary properties. -1 for all. - * @return the latest consistent revision out of the revisions listed in the journal. - * @throws IOException - */ - @Deprecated - public static String checkConsistency(File directory, String journalFileName, boolean fullTraversal, long debugInterval, long binLen) - throws IOException, InvalidFileStoreVersionException { - print("Searching for last good revision in {}", journalFileName); - JournalReader journal = new JournalReader(new File(directory, journalFileName)); - Set badPaths = newHashSet(); - ConsistencyChecker checker = new ConsistencyChecker(directory, debugInterval); - try { - int revisionCount = 0; - for (String revision : journal) { - try { - print("Checking revision {}", revision); - revisionCount++; - String badPath = checker.check(revision, badPaths, binLen); - if (badPath == null && fullTraversal) { - badPath = checker.traverse(revision, binLen); - } - if (badPath == null) { - print("Found latest good revision {}", revision); - print("Searched through {} revisions", revisionCount); - return revision; - } else { - badPaths.add(badPath); - print("Broken revision {}", revision); - } - } catch (IllegalArgumentException e) { - print("Skipping invalid record id {}", revision); - } - } - } finally { - checker.close(); - journal.close(); - } - - print("No good revision found"); - return null; - } - - /** - * Create a new consistency checker instance - * - * @param directory directory containing the tar files - * @param debugInterval number of seconds between printing progress information to - * the console during the full traversal phase. - * @throws IOException - */ - @Deprecated - public ConsistencyChecker(File directory, long debugInterval) throws IOException, InvalidFileStoreVersionException { - store = FileStore.builder(directory).buildReadOnly(); - this.debugInterval = debugInterval; - } - - /** - * Check whether the nodes and all its properties of all given - * {@code paths} are consistent at the given {@code revision}. - * - * @param revision revision to check - * @param paths paths to check - * @param binLen number of bytes to read from binary properties. -1 for all. - * @return Path of the first inconsistency detected or {@code null} if none. - */ - @Deprecated - public String check(String revision, Set paths, long binLen) { - store.setRevision(revision); - for (String path : paths) { - String err = checkPath(path, binLen); - if (err != null) { - return err; - } - } - return null; - } - - private String checkPath(String path, long binLen) { - try { - print("Checking {}", path); - NodeState root = SegmentNodeStore.builder(store).build().getRoot(); - String parentPath = getParentPath(path); - String name = getName(path); - NodeState parent = getNode(root, parentPath); - if (!denotesRoot(path) && parent.hasChildNode(name)) { - return traverse(parent.getChildNode(name), path, false, binLen); - } else { - return traverse(parent, parentPath, false, binLen); - } - } catch (RuntimeException e) { - print("Error while checking {}: {}", path, e.getMessage()); - return path; - } - } - - private int nodeCount; - private int propertyCount; - - /** - * Travers the given {@code revision} - * @param revision revision to travers - * @param binLen number of bytes to read from binary properties. -1 for all. - */ - @Deprecated - public String traverse(String revision, long binLen) { - try { - store.setRevision(revision); - nodeCount = 0; - propertyCount = 0; - String result = traverse(SegmentNodeStore.builder(store).build().getRoot(), "/", true, binLen); - print("Traversed {} nodes and {} properties", nodeCount, propertyCount); - return result; - } catch (RuntimeException e) { - print("Error while traversing {}", revision, e.getMessage()); - return "/"; - } - } - - private String traverse(NodeState node, String path, boolean deep, long binLen) { - try { - debug("Traversing {}", path); - nodeCount++; - for (PropertyState propertyState : node.getProperties()) { - debug("Checking {}/{}", path, propertyState); - Type type = propertyState.getType(); - if (type == BINARY) { - traverse(propertyState.getValue(BINARY), binLen); - } else if (type == BINARIES) { - for (Blob blob : propertyState.getValue(BINARIES)) { - traverse(blob, binLen); - } - } else { - propertyState.getValue(type); - } - propertyCount++; - } - for (ChildNodeEntry cne : node.getChildNodeEntries()) { - String childName = cne.getName(); - NodeState child = cne.getNodeState(); - if (deep) { - String result = traverse(child, concat(path, childName), true, binLen); - if (result != null) { - return result; - } - } - } - return null; - } catch (RuntimeException e) { - print("Error while traversing {}: {}", path, e.getMessage()); - return path; - } catch (IOException e) { - print("Error while traversing {}: {}", path, e.getMessage()); - return path; - } - } - - private static void traverse(Blob blob, long length) throws IOException { - if (length < 0) { - length = Long.MAX_VALUE; - } - if (length > 0 && !isExternal(blob)) { - InputStream s = blob.getNewStream(); - try { - byte[] buffer = new byte[8192]; - int l = s.read(buffer, 0, (int) min(buffer.length, length)); - while (l >= 0 && (length -= l) > 0) { - l = s.read(buffer, 0, (int) min(buffer.length, length)); - } - } finally { - s.close(); - } - } - } - - private static boolean isExternal(Blob b) { - if (b instanceof SegmentBlob) { - return ((SegmentBlob) b).isExternal(); - } - return false; - } - - @Deprecated - public void close() { - store.close(); - } - - private static void print(String format) { - LOG.info(format); - } - - private static void print(String format, Object arg) { - LOG.info(format, arg); - } - - private static void print(String format, Object arg1, Object arg2) { - LOG.info(format, arg1, arg2); - } - - private long ts; - - private void debug(String format, Object arg) { - if (debug()) { - LOG.debug(format, arg); - } - } - - private void debug(String format, Object arg1, Object arg2) { - if (debug()) { - LOG.debug(format, arg1, arg2); - } - } - - private boolean debug() { - // Avoid calling System.currentTimeMillis(), which is slow on some systems. - if (debugInterval == Long.MAX_VALUE) { - return false; - } else if (debugInterval == 0) { - return true; - } - - long ts = System.currentTimeMillis(); - if ((ts - this.ts) / 1000 > debugInterval) { - this.ts = ts; - return true; - } else { - return false; - } - } - - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/tooling/RevisionHistory.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/tooling/RevisionHistory.java deleted file mode 100644 index 513364b..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/file/tooling/RevisionHistory.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file.tooling; - -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.collect.Iterables.transform; -import static org.apache.jackrabbit.oak.commons.PathUtils.elements; -import static org.apache.jackrabbit.oak.json.JsonSerializer.DEFAULT_FILTER_EXPRESSION; - -import java.io.File; -import java.io.IOException; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; -import javax.annotation.Nullable; - -import com.google.common.base.Function; -import org.apache.jackrabbit.oak.json.BlobSerializer; -import org.apache.jackrabbit.oak.json.JsonSerializer; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore.ReadOnlyStore; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.plugins.segment.file.JournalReader; -import org.apache.jackrabbit.oak.spi.state.NodeState; - -/** - * Utility for tracing a node back through the revision history. - */ -@Deprecated -public class RevisionHistory { - private final ReadOnlyStore store; - - /** - * Create a new instance for a {@link FileStore} in the given {@code directory}. - * - * @param directory - * @throws IOException - */ - @Deprecated - public RevisionHistory(@Nonnull File directory) throws IOException, InvalidFileStoreVersionException { - this.store = FileStore.builder(checkNotNull(directory)).buildReadOnly(); - } - - private static NodeState getNode(SegmentNodeState root, String path) { - NodeState node = root; - for (String name : elements(path)) { - node = node.getChildNode(name); - } - return node; - } - - /** - * Return the history of the node at the given {@code path} according to the passed - * {@code journal}. - * - * @param journal - * @param path - * @return - * @throws IOException - */ - @Deprecated - public Iterable getHistory(@Nonnull File journal, @Nonnull final String path) - throws IOException { - checkNotNull(path); - return transform(new JournalReader(checkNotNull(journal)), - new Function() { - @Nullable @Override - public HistoryElement apply(String revision) { - store.setRevision(revision); - NodeState node = getNode(store.getHead(), path); - return new HistoryElement(revision, node); - } - }); - } - - /** - * Representation of a point in time for a given node. - */ - @Deprecated - public static final class HistoryElement { - private final String revision; - private final NodeState node; - - HistoryElement(String revision, NodeState node) { - this.revision = revision; - this.node = node; - } - - /** - * Revision of the node - * @return - */ - @Nonnull - @Deprecated - public String getRevision() { - return revision; - } - - /** - * Node at given revision - * @return - */ - @CheckForNull - @Deprecated - public NodeState getNode() { - return node; - } - - /** - * Serialise this element to JSON up to the given {@code depth}. - * @param depth - * @return - */ - @Deprecated - public String toString(int depth) { - JsonSerializer json = new JsonSerializer(depth, 0, Integer.MAX_VALUE, - DEFAULT_FILTER_EXPRESSION, new BlobSerializer()); - json.serialize(node); - return revision + "=" + json; - } - - /** - * @return {@code toString(0)} - */ - @Override - @Deprecated - public String toString() { - return toString(0); - } - - @Override - @Deprecated - public boolean equals(Object other) { - if (this == other) { - return true; - } - if (other == null || getClass() != other.getClass()) { - return false; - } - - HistoryElement that = (HistoryElement) other; - return revision.equals(that.revision) && - (node == null ? that.node == null : node.equals(that.node)); - - } - - @Override - @Deprecated - public int hashCode() { - return 31 * revision.hashCode() + - (node != null ? node.hashCode() : 0); - } - } -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/http/HttpStore.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/http/HttpStore.java deleted file mode 100644 index 2d0dfa1..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/http/HttpStore.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.http; - -import static com.google.common.base.Charsets.UTF_8; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.net.MalformedURLException; -import java.net.URL; -import java.net.URLConnection; -import java.nio.ByteBuffer; - -import javax.annotation.CheckForNull; - -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.plugins.segment.RecordId; -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNotFoundException; -import org.apache.jackrabbit.oak.plugins.segment.SegmentTracker; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; - -import com.google.common.io.ByteStreams; - -import org.apache.jackrabbit.oak.spi.blob.BlobStore; - -@Deprecated -public class HttpStore implements SegmentStore { - - private final SegmentTracker tracker = new SegmentTracker(this); - - private final URL base; - - /** - * @param base - * make sure the url ends with a slash "/", otherwise the - * requests will end up as absolute instead of relative - */ - @Deprecated - public HttpStore(URL base) { - this.base = base; - } - - @Override - @Deprecated - public SegmentTracker getTracker() { - return tracker; - } - - /** - * Builds a simple URLConnection. This method can be extended to add - * authorization headers if needed. - * - */ - protected URLConnection get(String fragment) throws MalformedURLException, - IOException { - final URL url; - if (fragment == null) { - url = base; - } else { - url = new URL(base, fragment); - } - return url.openConnection(); - } - - @Override - @Deprecated - public SegmentNodeState getHead() { - try { - URLConnection connection = get(null); - InputStream stream = connection.getInputStream(); - try { - BufferedReader reader = new BufferedReader( - new InputStreamReader(stream, UTF_8)); - return new SegmentNodeState( - RecordId.fromString(tracker, reader.readLine())); - } finally { - stream.close(); - } - } catch (IllegalArgumentException e) { - throw new IllegalStateException(e); - } catch (MalformedURLException e) { - throw new IllegalStateException(e); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @Override - @Deprecated - public boolean setHead(SegmentNodeState base, SegmentNodeState head) { - // TODO throw new UnsupportedOperationException(); - return true; - } - - @Override - @Deprecated - public boolean containsSegment(SegmentId id) { - return id.getTracker() == tracker || readSegment(id) != null; - } - - @Override - @Deprecated - public Segment readSegment(SegmentId id) { - try { - URLConnection connection = get(id.toString()); - InputStream stream = connection.getInputStream(); - try { - byte[] data = ByteStreams.toByteArray(stream); - return new Segment(tracker, id, ByteBuffer.wrap(data)); - } finally { - stream.close(); - } - } catch (MalformedURLException e) { - throw new SegmentNotFoundException(id, e); - } catch (IOException e) { - throw new SegmentNotFoundException(id, e); - } - } - - @Override - @Deprecated - public void writeSegment( - SegmentId id, byte[] bytes, int offset, int length) throws IOException { - try { - URLConnection connection = get(id.toString()); - connection.setDoInput(false); - connection.setDoOutput(true); - OutputStream stream = connection.getOutputStream(); - try { - stream.write(bytes, offset, length); - } finally { - stream.close(); - } - } catch (MalformedURLException e) { - throw new IOException(e); - } - } - - @Override - @Deprecated - public void close() { - } - - @Override @CheckForNull - @Deprecated - public Blob readBlob(String reference) { - return null; - } - - @Override @CheckForNull - @Deprecated - public BlobStore getBlobStore() { - return null; - } - - @Override - @Deprecated - public void gc() { - // TODO: distributed gc - } - -} diff --git oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/memory/MemoryStore.java oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/memory/MemoryStore.java deleted file mode 100644 index 3633fcf..0000000 --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/memory/MemoryStore.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.memory; - -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.concurrent.ConcurrentMap; - -import javax.annotation.Nonnull; - -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNotFoundException; -import org.apache.jackrabbit.oak.plugins.segment.SegmentTracker; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentVersion; -import org.apache.jackrabbit.oak.plugins.segment.SegmentWriter; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; - -import com.google.common.collect.Maps; - -/** - * A store used for in-memory operations. - */ -@Deprecated -public class MemoryStore implements SegmentStore { - - private final SegmentTracker tracker = new SegmentTracker(this, 16, SegmentVersion.V_11); - - private SegmentNodeState head; - - private final ConcurrentMap segments = - Maps.newConcurrentMap(); - - @Deprecated - public MemoryStore(NodeState root) throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setChildNode("root", root); - - SegmentWriter writer = tracker.getWriter(); - this.head = writer.writeNode(builder.getNodeState()); - writer.flush(); - } - - @Deprecated - public MemoryStore() throws IOException { - this(EMPTY_NODE); - } - - @Override - @Deprecated - public SegmentTracker getTracker() { - return tracker; - } - - @Override - @Deprecated - public synchronized SegmentNodeState getHead() { - return head; - } - - @Override - @Deprecated - public synchronized boolean setHead(SegmentNodeState base, SegmentNodeState head) { - if (this.head.getRecordId().equals(base.getRecordId())) { - this.head = head; - return true; - } else { - return false; - } - } - - @Override - @Deprecated - public boolean containsSegment(SegmentId id) { - return id.getTracker() == tracker || segments.containsKey(id); - } - - @Override @Nonnull - @Deprecated - public Segment readSegment(SegmentId id) { - Segment segment = segments.get(id); - if (segment != null) { - return segment; - } - throw new SegmentNotFoundException(id); - } - - @Override - @Deprecated - public void writeSegment( - SegmentId id, byte[] data, int offset, int length) throws IOException { - ByteBuffer buffer = ByteBuffer.allocate(length); - buffer.put(data, offset, length); - buffer.rewind(); - Segment segment = new Segment(tracker, id, buffer); - if (segments.putIfAbsent(id, segment) != null) { - throw new IOException("Segment override: " + id); - } - } - - @Override - @Deprecated - public void close() { - } - - @Override - @Deprecated - public Blob readBlob(String reference) { - return null; - } - - @Override - @Deprecated - public BlobStore getBlobStore() { - return null; - } - - @Override - @Deprecated - public void gc() { - System.gc(); - segments.keySet().retainAll(tracker.getReferencedSegmentIds()); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/backup/FileStoreBackupTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/backup/FileStoreBackupTest.java deleted file mode 100644 index 9b21fbd..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/backup/FileStoreBackupTest.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.backup; - -import static org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore.builder; -import static org.junit.Assert.assertEquals; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.util.Random; - -import org.apache.jackrabbit.oak.Oak; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.plugins.nodetype.write.InitialContent; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.security.OpenSecurityProvider; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -public class FileStoreBackupTest { - - private File src; - private File destination; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - @Before - public void before() throws Exception { - src = folder.newFolder("src"); - destination = folder.newFolder("dst"); - } - - @Test - public void testBackup() throws Exception { - FileStore source = FileStore.builder(src).withMaxFileSize(8).build(); - - NodeStore store = builder(source).build(); - init(store); - - // initial content - FileStoreBackup.backup(store, destination); - - compare(source, destination); - - addTestContent(store); - FileStoreBackup.backup(store, destination); - compare(source, destination); - - source.close(); - } - - @Test - public void testRestore() throws Exception { - FileStore source = FileStore.builder(src).withMaxFileSize(8).build(); - - NodeStore store = builder(source).build(); - init(store); - FileStoreBackup.backup(store, destination); - addTestContent(store); - source.close(); - - FileStoreRestore.restore(destination, src); - - source = FileStore.builder(src).withMaxFileSize(8).build(); - compare(source, destination); - source.close(); - } - - private static void addTestContent(NodeStore store) - throws CommitFailedException, IOException { - NodeBuilder builder = store.getRoot().builder(); - NodeBuilder c = builder.child("test-backup").child("binaries"); - for (int i = 0; i < 2; i++) { - c.setProperty("bin" + i, createBlob(store, 64 * 1024)); - } - builder.child("root"); // make sure we don't backup the super-root - store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - } - - private static Blob createBlob(NodeStore nodeStore, int size) throws IOException { - byte[] data = new byte[size]; - new Random().nextBytes(data); - return nodeStore.createBlob(new ByteArrayInputStream(data)); - } - - private static void compare(FileStore store, File destination) throws IOException, InvalidFileStoreVersionException { - FileStore backup = FileStore.builder(destination).withMaxFileSize(8).build(); - assertEquals(store.getHead(), backup.getHead()); - backup.close(); - } - - private static void init(NodeStore store) { - new Oak(store).with(new OpenSecurityProvider()) - .with(new InitialContent()).createContentRepository(); - } -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CancelableDiffTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CancelableDiffTest.java deleted file mode 100644 index 604cd4c..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CancelableDiffTest.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import com.google.common.base.Suppliers; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStateDiff; -import org.junit.Test; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; - -public class CancelableDiffTest { - - @Test - public void testPropertyAddedInterruptible() throws Throwable { - PropertyState after = mock(PropertyState.class); - - NodeStateDiff wrapped = mock(NodeStateDiff.class); - doReturn(true).when(wrapped).propertyAdded(after); - - assertTrue(newCancelableDiff(wrapped, false).propertyAdded(after)); - assertFalse(newCancelableDiff(wrapped, true).propertyAdded(after)); - } - - @Test - public void testPropertyChangedInterruptible() throws Throwable { - PropertyState before = mock(PropertyState.class); - PropertyState after = mock(PropertyState.class); - - NodeStateDiff wrapped = mock(NodeStateDiff.class); - doReturn(true).when(wrapped).propertyChanged(before, after); - - assertTrue(newCancelableDiff(wrapped, false).propertyChanged(before, after)); - assertFalse(newCancelableDiff(wrapped, true).propertyChanged(before, after)); - } - - @Test - public void testPropertyDeletedInterruptible() throws Throwable { - PropertyState before = mock(PropertyState.class); - - NodeStateDiff wrapped = mock(NodeStateDiff.class); - doReturn(true).when(wrapped).propertyDeleted(before); - - assertTrue(newCancelableDiff(wrapped, false).propertyDeleted(before)); - assertFalse(newCancelableDiff(wrapped, true).propertyDeleted(before)); - } - - @Test - public void testChildNodeAddedInterruptible() throws Throwable { - NodeState after = mock(NodeState.class); - - NodeStateDiff wrapped = mock(NodeStateDiff.class); - doReturn(true).when(wrapped).childNodeAdded("name", after); - - assertTrue(newCancelableDiff(wrapped, false).childNodeAdded("name", after)); - assertFalse(newCancelableDiff(wrapped, true).childNodeAdded("name", after)); - } - - @Test - public void testChildNodeChangedInterruptible() throws Throwable { - NodeState before = mock(NodeState.class); - NodeState after = mock(NodeState.class); - - NodeStateDiff wrapped = mock(NodeStateDiff.class); - doReturn(true).when(wrapped).childNodeChanged("name", before, after); - - assertTrue(newCancelableDiff(wrapped, false).childNodeChanged("name", before, after)); - assertFalse(newCancelableDiff(wrapped, true).childNodeChanged("name", before, after)); - } - - @Test - public void testChildNodeDeletedInterruptible() throws Throwable { - NodeState before = mock(NodeState.class); - - NodeStateDiff wrapped = mock(NodeStateDiff.class); - doReturn(true).when(wrapped).childNodeDeleted("name", before); - - assertTrue(newCancelableDiff(wrapped, false).childNodeDeleted("name", before)); - assertFalse(newCancelableDiff(wrapped, true).childNodeDeleted("name", before)); - } - - private NodeStateDiff newCancelableDiff(NodeStateDiff wrapped, boolean cancel) { - return new CancelableDiff(wrapped, Suppliers.ofInstance(cancel)); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CheckpointTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CheckpointTest.java deleted file mode 100644 index 3b58d51..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CheckpointTest.java +++ /dev/null @@ -1,194 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.concurrent.Callable; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.Test; - -public class CheckpointTest { - - @Test - public void testCheckpoint() throws CommitFailedException, IOException { - SegmentNodeStore store = SegmentNodeStore.builder(new MemoryStore()).build(); - addTestNode(store, "test-checkpoint"); - verifyNS(store, true); - rmTestNode(store, "test-checkpoint"); - verifyNS(store, false); - - // gc? - store.retrieve("missing-checkpoint"); - } - - @Test - public void testRelease() throws CommitFailedException, IOException { - SegmentNodeStore store = SegmentNodeStore.builder(new MemoryStore()).build(); - addTestNode(store, "test-checkpoint"); - String cp = verifyNS(store, true); - - store.release(cp); - assertNull(store.retrieve(cp)); - - } - - private static String verifyNS(SegmentNodeStore store, boolean exists) { - String cp = store.checkpoint(TimeUnit.HOURS.toMillis(1)); - assertNotNull("Checkpoint must not be null", cp); - - NodeState cpns = store.retrieve(cp); - assertNotNull(cpns); - if (exists) { - assertTrue("Node doesn't exist in checkpoint", - cpns.getChildNode("test-checkpoint").exists()); - } else { - assertFalse("Node shouldn't exist in checkpoint", cpns - .getChildNode("test-checkpoint").exists()); - } - return cp; - } - - private static void addTestNode(NodeStore store, String name) - throws CommitFailedException { - NodeBuilder builder = store.getRoot().builder(); - builder.child(name); - store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - } - - private static void rmTestNode(NodeStore store, String name) - throws CommitFailedException { - NodeBuilder builder = store.getRoot().builder(); - builder.child(name).remove(); - store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - } - - /** - * OAK-3587 test simulates a timeout while trying to create a checkpoint, - * then releases the lock and tries again - */ - @Test - public void testShortWait() throws Exception { - final SegmentNodeStore store = SegmentNodeStore.builder(new MemoryStore()).build(); - store.setCheckpointsLockWaitTime(1); - - final Semaphore semaphore = new Semaphore(0); - final AtomicBoolean blocking = new AtomicBoolean(true); - - final Callable block = new Callable() { - - @Override - public Boolean call() { - while (blocking.get()) { - if (semaphore.availablePermits() == 0) { - semaphore.release(); - } - } - return true; - } - }; - - Thread background = new Thread() { - @Override - public void run() { - try { - store.locked(block); - } catch (Exception e) { - // - } - } - }; - - background.start(); - semaphore.acquire(); - - String cp0 = store.checkpoint(10); - assertNull(store.retrieve(cp0)); - - blocking.set(false); - String cp1 = store.checkpoint(10); - assertNotNull(store.retrieve(cp1)); - } - - /** - * OAK-3587 test simulates a wait less than configured - * {@code SegmentNodeStore#setCheckpointsLockWaitTime(int)} value so the - * checkpoint call must return a valid value - */ - @Test - public void testLongWait() throws Exception { - final int blockTime = 1; - final SegmentNodeStore store = SegmentNodeStore.builder(new MemoryStore()).build(); - store.setCheckpointsLockWaitTime(blockTime + 1); - - final Semaphore semaphore = new Semaphore(0); - - final Callable block = new Callable() { - - @Override - public Boolean call() { - try { - semaphore.release(); - TimeUnit.SECONDS.sleep(blockTime); - } catch (InterruptedException e) { - // - } - return true; - } - }; - - Thread background = new Thread() { - @Override - public void run() { - try { - store.locked(block); - } catch (Exception e) { - // - } - } - }; - - background.start(); - semaphore.acquire(); - - String cp0 = store.checkpoint(10); - assertNotNull(store.retrieve(cp0)); - } - - @Test - public void testCheckpointMax() throws CommitFailedException, IOException { - SegmentNodeStore store = SegmentNodeStore.builder(new MemoryStore()).build(); - String cp0 = store.checkpoint(Long.MAX_VALUE); - String cp1 = store.checkpoint(Long.MAX_VALUE); - assertNotNull(store.retrieve(cp0)); - assertNotNull(store.retrieve(cp1)); - } -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CompactionAndCleanupIT.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CompactionAndCleanupIT.java deleted file mode 100644 index 59d2166..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CompactionAndCleanupIT.java +++ /dev/null @@ -1,719 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Lists.newArrayList; -import static java.lang.Integer.getInteger; -import static java.util.concurrent.Executors.newFixedThreadPool; -import static java.util.concurrent.TimeUnit.MINUTES; -import static org.apache.commons.io.FileUtils.byteCountToDisplaySize; -import static org.apache.jackrabbit.oak.api.Type.STRING; -import static org.apache.jackrabbit.oak.commons.FixturesHelper.Fixture.SEGMENT_MK; -import static org.apache.jackrabbit.oak.commons.FixturesHelper.getFixtures; -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore.builder; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.CleanupType.CLEAN_ALL; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.CleanupType.CLEAN_NONE; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.CleanupType.CLEAN_OLD; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.junit.Assume.assumeTrue; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.util.HashSet; -import java.util.List; -import java.util.Random; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.FutureTask; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -import javax.annotation.Nonnull; - -import com.google.common.io.ByteStreams; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser; -import org.apache.jackrabbit.oak.plugins.blob.ReferenceCollector; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class CompactionAndCleanupIT { - - private static final Logger log = LoggerFactory - .getLogger(CompactionAndCleanupIT.class); - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - private File getFileStoreFolder() { - return folder.getRoot(); - } - - public static void assumptions() { - assumeTrue(getFixtures().contains(SEGMENT_MK)); - } - - @Test - public void compactionNoBinaryClone() throws Exception { - // 2MB data, 5MB blob - final int blobSize = 5 * 1024 * 1024; - final int dataNodes = 10000; - - // really long time span, no binary cloning - - FileStore fileStore = FileStore.builder(getFileStoreFolder()) - .withMaxFileSize(1) - .build(); - final SegmentNodeStore nodeStore = SegmentNodeStore.builder(fileStore).build(); - CompactionStrategy custom = new CompactionStrategy(false, false, - CLEAN_OLD, TimeUnit.HOURS.toMillis(1), (byte) 0) { - @Override - public boolean compacted(@Nonnull Callable setHead) - throws Exception { - return nodeStore.locked(setHead); - } - }; - // Use in memory compaction map as gains asserted later on - // do not take additional space of the compaction map into consideration - custom.setPersistCompactionMap(false); - fileStore.setCompactionStrategy(custom); - - // 1a. Create a bunch of data - NodeBuilder extra = nodeStore.getRoot().builder(); - NodeBuilder content = extra.child("content"); - for (int i = 0; i < dataNodes; i++) { - NodeBuilder c = content.child("c" + i); - for (int j = 0; j < 1000; j++) { - c.setProperty("p" + i, "v" + i); - } - } - nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY); - // ---- - - final long dataSize = fileStore.size(); - log.debug("File store dataSize {}", byteCountToDisplaySize(dataSize)); - - try { - // 1. Create a property with 5 MB blob - NodeBuilder builder = nodeStore.getRoot().builder(); - builder.setProperty("a1", createBlob(nodeStore, blobSize)); - builder.setProperty("b", "foo"); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - log.debug("File store pre removal {}, expecting {}", - byteCountToDisplaySize(fileStore.size()), - byteCountToDisplaySize(blobSize + dataSize)); - assertEquals(mb(blobSize + dataSize), mb(fileStore.size())); - - // 2. Now remove the property - builder = nodeStore.getRoot().builder(); - builder.removeProperty("a1"); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - // Size remains same, no cleanup happened yet - log.debug("File store pre compaction {}, expecting {}", - byteCountToDisplaySize(fileStore.size()), - byteCountToDisplaySize(blobSize + dataSize)); - assertEquals(mb(blobSize + dataSize), mb(fileStore.size())); - - // 3. Compact - assertTrue(fileStore.maybeCompact(false)); - - // Size doesn't shrink: ran compaction with a '1 Hour' cleanup - // strategy - assertSize("post compaction", fileStore.size(), - blobSize + dataSize, blobSize + 2 * dataSize); - - // 4. Add some more property to flush the current TarWriter - builder = nodeStore.getRoot().builder(); - builder.setProperty("a2", createBlob(nodeStore, blobSize)); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - // Size is double - assertSize("pre cleanup", fileStore.size(), 2 * blobSize - + dataSize, 2 * blobSize + 2 * dataSize); - - // 5. Cleanup, expecting store size: - // no data content => - // fileStore.size() == blobSize - // some data content => - // fileStore.size() in [blobSize + dataSize, blobSize + 2 x dataSize] - assertTrue(fileStore.maybeCompact(false)); - fileStore.cleanup(); - assertSize("post cleanup", fileStore.size(), 0, blobSize + 2 * dataSize); - - // refresh the ts ref, to simulate a long wait time - custom.setOlderThan(0); - TimeUnit.MILLISECONDS.sleep(5); - - boolean needsCompaction = true; - for (int i = 0; i < 3 && needsCompaction; i++) { - needsCompaction = fileStore.maybeCompact(false); - fileStore.cleanup(); - } - - // gain is finally 0% - assertFalse(fileStore.maybeCompact(false)); - - // no data loss happened - byte[] blob = ByteStreams.toByteArray(nodeStore.getRoot() - .getProperty("a2").getValue(Type.BINARY).getNewStream()); - assertEquals(blobSize, blob.length); - } finally { - fileStore.close(); - } - } - - @Test - public void noCleanupOnCompactionMap() throws Exception { - // 2MB data, 5MB blob - final int blobSize = 5 * 1024 * 1024; - final int dataNodes = 10000; - - FileStore fileStore = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).build(); - final SegmentNodeStore nodeStore = SegmentNodeStore.builder(fileStore).build(); - CompactionStrategy custom = new CompactionStrategy(false, false, - CLEAN_OLD, TimeUnit.HOURS.toMillis(1), (byte) 0) { - @Override - public boolean compacted(@Nonnull Callable setHead) - throws Exception { - return nodeStore.locked(setHead); - } - }; - fileStore.setCompactionStrategy(custom); - - // 1a. Create a bunch of data - NodeBuilder extra = nodeStore.getRoot().builder(); - NodeBuilder content = extra.child("content"); - for (int i = 0; i < dataNodes; i++) { - NodeBuilder c = content.child("c" + i); - for (int j = 0; j < 1000; j++) { - c.setProperty("p" + i, "v" + i); - } - } - nodeStore.merge(extra, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - final long dataSize = fileStore.size(); - log.debug("File store dataSize {}", byteCountToDisplaySize(dataSize)); - - try { - // 1. Create a property with 5 MB blob - NodeBuilder builder = nodeStore.getRoot().builder(); - builder.setProperty("a1", createBlob(nodeStore, blobSize)); - builder.setProperty("b", "foo"); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - // 2. Now remove the property - builder = nodeStore.getRoot().builder(); - builder.removeProperty("a1"); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - // 3. Compact - fileStore.maybeCompact(false); - - // 4. Add some more property to flush the current TarWriter - builder = nodeStore.getRoot().builder(); - builder.setProperty("a2", createBlob(nodeStore, blobSize)); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - // There should be no SNFE when running cleanup as compaction map segments - // should be pinned and thus not collected - fileStore.maybeCompact(false); - fileStore.cleanup(); - - // refresh the ts ref, to simulate a long wait time - custom.setOlderThan(0); - TimeUnit.MILLISECONDS.sleep(5); - - boolean needsCompaction = true; - for (int i = 0; i < 3 && needsCompaction; i++) { - needsCompaction = fileStore.maybeCompact(false); - fileStore.cleanup(); - } - } finally { - fileStore.close(); - } - } - - private static void assertSize(String info, long size, long lower, - long upper) { - log.debug("File Store {} size {}, expected in interval [{},{}]", info, - byteCountToDisplaySize(size), byteCountToDisplaySize(lower), - byteCountToDisplaySize(upper)); - assertTrue("File Store " + log + " size expected in interval [" - + mb(lower) + "," + mb(upper) + "] but was: " + mb(size), - mb(size) >= mb(lower) && mb(size) <= mb(upper)); - } - - private static Blob createBlob(NodeStore nodeStore, int size) throws IOException { - byte[] data = new byte[size]; - new Random().nextBytes(data); - return nodeStore.createBlob(new ByteArrayInputStream(data)); - } - - private static long mb(long size){ - return size / (1024 * 1024); - } - - /** - * Regression test for OAK-2192 testing for mixed segments. This test does not - * cover OAK-3348. I.e. it does not assert the segment graph is free of cross - * gc generation references. - */ - @Test - public void testMixedSegments() throws Exception { - FileStore store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(2).withMemoryMapping(true).build(); - final SegmentNodeStore nodeStore = SegmentNodeStore.builder(store).build(); - final AtomicBoolean compactionSuccess = new AtomicBoolean(true); - CompactionStrategy strategy = new CompactionStrategy(true, false, CLEAN_NONE, 0, (byte) 5) { - @Override - public boolean compacted(Callable setHead) throws Exception { - compactionSuccess.set(nodeStore.locked(setHead, 1, MINUTES)); - return compactionSuccess.get(); - } - }; - strategy.setForceAfterFail(true); - store.setCompactionStrategy(strategy); - - NodeBuilder root = nodeStore.getRoot().builder(); - createNodes(root.setChildNode("test"), 10, 3); - nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - final Set beforeSegments = new HashSet(); - collectSegments(store.getHead(), beforeSegments); - - final AtomicReference run = new AtomicReference(true); - final List failedCommits = newArrayList(); - Thread[] threads = new Thread[10]; - for (int k = 0; k < threads.length; k++) { - final int threadId = k; - threads[k] = new Thread(new Runnable() { - @Override - public void run() { - for (int j = 0; run.get(); j++) { - String nodeName = "b-" + threadId + "," + j; - try { - NodeBuilder root = nodeStore.getRoot().builder(); - root.setChildNode(nodeName); - nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY); - Thread.sleep(5); - } catch (CommitFailedException e) { - failedCommits.add(nodeName); - } catch (InterruptedException e) { - Thread.interrupted(); - break; - } - } - } - }); - threads[k].start(); - } - store.compact(); - run.set(false); - for (Thread t : threads) { - t.join(); - } - store.flush(); - - assumeTrue("Failed to acquire compaction lock", compactionSuccess.get()); - assertTrue("Failed commits: " + failedCommits, failedCommits.isEmpty()); - - Set afterSegments = new HashSet(); - collectSegments(store.getHead(), afterSegments); - try { - for (UUID u : beforeSegments) { - assertFalse("Mixed segments found: " + u, afterSegments.contains(u)); - } - } finally { - store.close(); - } - } - - /** - * Set a root node referring to a child node that lives in a different segments. Depending - * on the order how the SegmentBufferWriters associated with the threads used to create the - * nodes are flushed, this will introduce a forward reference between the segments. - * The current cleanup mechanism cannot handle forward references and removes the referenced - * segment causing a SNFE. - * This is a regression introduced with OAK-1828. - */ - @Test - public void cleanupCyclicGraph() throws Exception { - FileStore fileStore = FileStore.builder(getFileStoreFolder()).build(); - final SegmentWriter writer = fileStore.getTracker().getWriter(); - final SegmentNodeState oldHead = fileStore.getHead(); - - final SegmentNodeState child = run(new Callable() { - @Override - public SegmentNodeState call() throws Exception { - NodeBuilder builder = EMPTY_NODE.builder(); - return writer.writeNode(EMPTY_NODE); - } - }); - SegmentNodeState newHead = run(new Callable() { - @Override - public SegmentNodeState call() throws Exception { - NodeBuilder builder = oldHead.builder(); - builder.setChildNode("child", child); - return writer.writeNode(builder.getNodeState()); - } - }); - - writer.flush(); - fileStore.setHead(oldHead, newHead); - fileStore.close(); - - fileStore = FileStore.builder(getFileStoreFolder()).build(); - - traverse(fileStore.getHead()); - fileStore.cleanup(); - - // Traversal after cleanup might result in an SNFE - traverse(fileStore.getHead()); - - fileStore.close(); - } - - private static void traverse(NodeState node) { - for (ChildNodeEntry childNodeEntry : node.getChildNodeEntries()) { - traverse(childNodeEntry.getNodeState()); - } - } - - private static T run(Callable callable) throws InterruptedException, ExecutionException { - FutureTask task = new FutureTask(callable); - new Thread(task).start(); - return task.get(); - } - - /** - * Test asserting OAK-3348: Cross gc sessions might introduce references to pre-compacted segments - */ - @Test - @Ignore("OAK-3348") // FIXME OAK-3348 - public void preCompactionReferences() throws Exception { - for (String ref : new String[] {"merge-before-compact", "merge-after-compact"}) { - File repoDir = new File(getFileStoreFolder(), ref); - FileStore fileStore = FileStore.builder(repoDir).withMaxFileSize(2).build(); - final SegmentNodeStore nodeStore = builder(fileStore).build(); - fileStore.setCompactionStrategy(new CompactionStrategy(true, false, CLEAN_NONE, 0, (byte) 5) { - @Override - public boolean compacted(Callable setHead) throws Exception { - return nodeStore.locked(setHead); - } - }); - - try { - // add some content - NodeBuilder preGCBuilder = nodeStore.getRoot().builder(); - preGCBuilder.setChildNode("test").setProperty("blob", createBlob(nodeStore, 1024 * 1024)); - nodeStore.merge(preGCBuilder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - // remove it again so we have something to gc - preGCBuilder = nodeStore.getRoot().builder(); - preGCBuilder.getChildNode("test").remove(); - nodeStore.merge(preGCBuilder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - // with a new builder simulate exceeding the update limit. - // This will cause changes to be pre-written to segments - preGCBuilder = nodeStore.getRoot().builder(); - preGCBuilder.setChildNode("test").setChildNode("a").setChildNode("b").setProperty("foo", "bar"); - for (int k = 0; k < getInteger("update.limit", 10000); k += 2) { - preGCBuilder.setChildNode("dummy").remove(); - } - - // case 1: merge above changes before compact - if ("merge-before-compact".equals(ref)) { - NodeBuilder builder = nodeStore.getRoot().builder(); - builder.setChildNode("n"); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - nodeStore.merge(preGCBuilder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - } - - fileStore.compact(); - - // case 2: merge above changes after compact - if ("merge-after-compact".equals(ref)) { - NodeBuilder builder = nodeStore.getRoot().builder(); - builder.setChildNode("n"); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - nodeStore.merge(preGCBuilder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - } - } finally { - fileStore.close(); - } - - // Re-initialise the file store to simulate off-line gc - fileStore = FileStore.builder(repoDir).withMaxFileSize(2).build(); - try { - // The 1M blob should get gc-ed. This works for case 1. - // However it doesn't for case 2 as merging after compaction - // apparently creates references from the current segment - // to the pre-compacted segment to which above changes have - // been pre-written. - fileStore.cleanup(); - assertTrue(ref + " repository size " + fileStore.size() + " < " + 1024 * 1024, - fileStore.size() < 1024 * 1024); - } finally { - fileStore.close(); - } - } - } - - private static void collectSegments(SegmentNodeState s, final Set segmentIds) { - new SegmentParser() { - @Override - protected void onNode(RecordId parentId, RecordId nodeId) { - super.onNode(parentId, nodeId); - segmentIds.add(nodeId.asUUID()); - } - - @Override - protected void onTemplate(RecordId parentId, RecordId templateId) { - super.onTemplate(parentId, templateId); - segmentIds.add(templateId.asUUID()); - } - - @Override - protected void onMap(RecordId parentId, RecordId mapId, MapRecord map) { - super.onMap(parentId, mapId, map); - segmentIds.add(mapId.asUUID()); - } - - @Override - protected void onMapDiff(RecordId parentId, RecordId mapId, MapRecord map) { - super.onMapDiff(parentId, mapId, map); - segmentIds.add(mapId.asUUID()); - } - - @Override - protected void onMapLeaf(RecordId parentId, RecordId mapId, MapRecord map) { - super.onMapLeaf(parentId, mapId, map); - segmentIds.add(mapId.asUUID()); - } - - @Override - protected void onMapBranch(RecordId parentId, RecordId mapId, MapRecord map) { - super.onMapBranch(parentId, mapId, map); - segmentIds.add(mapId.asUUID()); - } - - @Override - protected void onProperty(RecordId parentId, RecordId propertyId, PropertyTemplate template) { - super.onProperty(parentId, propertyId, template); - segmentIds.add(propertyId.asUUID()); - } - - @Override - protected void onValue(RecordId parentId, RecordId valueId, Type type) { - super.onValue(parentId, valueId, type); - segmentIds.add(valueId.asUUID()); - } - - @Override - protected void onBlob(RecordId parentId, RecordId blobId) { - super.onBlob(parentId, blobId); - segmentIds.add(blobId.asUUID()); - } - - @Override - protected void onString(RecordId parentId, RecordId stringId) { - super.onString(parentId, stringId); - segmentIds.add(stringId.asUUID()); - } - - @Override - protected void onList(RecordId parentId, RecordId listId, int count) { - super.onList(parentId, listId, count); - segmentIds.add(listId.asUUID()); - } - - @Override - protected void onListBucket(RecordId parentId, RecordId listId, int index, int count, int capacity) { - super.onListBucket(parentId, listId, index, count, capacity); - segmentIds.add(listId.asUUID()); - } - }.parseNode(s.getRecordId()); - } - - private static void createNodes(NodeBuilder builder, int count, int depth) { - if (depth > 0) { - for (int k = 0; k < count; k++) { - NodeBuilder child = builder.setChildNode("node" + k); - createProperties(child, count); - createNodes(child, count, depth - 1); - } - } - } - - private static void createProperties(NodeBuilder builder, int count) { - for (int k = 0; k < count; k++) { - builder.setProperty("property-" + UUID.randomUUID().toString(), "value-" + UUID.randomUUID().toString()); - } - } - - @Test - public void propertyRetention() throws Exception { - FileStore fileStore = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).build(); - try { - final SegmentNodeStore nodeStore = SegmentNodeStore.builder(fileStore).build(); - CompactionStrategy strategy = new CompactionStrategy(false, false, CLEAN_ALL, 0, (byte) 0) { - @Override - public boolean compacted(@Nonnull Callable setHead) - throws Exception { - return nodeStore.locked(setHead); - } - }; - // CLEAN_ALL and persisted compaction map results in SNFE in compaction map segments - strategy.setPersistCompactionMap(false); - fileStore.setCompactionStrategy(strategy); - - // Add a property - NodeBuilder builder = nodeStore.getRoot().builder(); - builder.setChildNode("test").setProperty("property", "value"); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - // Segment id of the current segment - NodeState test = nodeStore.getRoot().getChildNode("test"); - SegmentId id = ((SegmentNodeState) test).getRecordId().getSegmentId(); - assertTrue(fileStore.containsSegment(id)); - - // Add enough content to fill up the current tar file - builder = nodeStore.getRoot().builder(); - addContent(builder.setChildNode("dump")); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - // Segment and property still there - assertTrue(fileStore.containsSegment(id)); - PropertyState property = test.getProperty("property"); - assertEquals("value", property.getValue(STRING)); - - // GC should remove the segment - fileStore.flush(); - fileStore.compact(); - fileStore.cleanup(); - - try { - fileStore.readSegment(id); - fail("Segment " + id + "should be gc'ed"); - } catch (SegmentNotFoundException ignore) {} - } finally { - fileStore.close(); - } - } - - @Test - public void randomAccessFileConcurrentReadAndLength() throws Exception { - final FileStore fileStore = FileStore.builder(getFileStoreFolder()) - .withMaxFileSize(1) - .withMemoryMapping(false) - .build(); - - final SegmentNodeStore nodeStore = SegmentNodeStore.builder(fileStore).build(); - - ExecutorService executorService = newFixedThreadPool(300); - final AtomicInteger counter = new AtomicInteger(); - final ReferenceCollector dummyCollector = new ReferenceCollector() { - - @Override - public void addReference(String reference, String nodeId) { - // do nothing - } - }; - - try { - Callable concurrentWriteTask = new Callable() { - @Override - public Void call() throws Exception { - NodeBuilder builder = nodeStore.getRoot().builder(); - builder.setProperty("blob-" + counter.getAndIncrement(), createBlob(nodeStore, 25 * 25)); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - fileStore.flush(); - return null; - } - }; - - Callable concurrentCleanupTask = new Callable() { - @Override - public Void call() throws Exception { - fileStore.cleanup(); - return null; - } - }; - - Callable concurrentReferenceCollector = new Callable() { - @Override - public Void call() throws Exception { - fileStore.getTracker().collectBlobReferences(dummyCollector); - return null; - } - }; - - List> results = newArrayList(); - results.add(executorService.submit(concurrentCleanupTask)); - - for (int i = 0; i < 100; i++) { - results.add(executorService.submit(concurrentWriteTask)); - results.add(executorService.submit(concurrentReferenceCollector)); - } - - for (Future result : results) { - assertNull(result.get()); - } - - } finally { - new ExecutorCloser(executorService).close(); - fileStore.close(); - } - } - - private static void addContent(NodeBuilder builder) { - for (int k = 0; k < 10000; k++) { - builder.setProperty(UUID.randomUUID().toString(), UUID.randomUUID().toString()); - } - } -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CompactionMapTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CompactionMapTest.java deleted file mode 100644 index 21ba88b..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CompactionMapTest.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Iterables.get; -import static com.google.common.collect.Maps.newHashMap; -import static com.google.common.collect.Sets.newHashSet; -import static java.util.Collections.singleton; -import static org.apache.jackrabbit.oak.plugins.segment.CompactionMap.sum; -import static org.apache.jackrabbit.oak.plugins.segment.TestUtils.newRecordId; -import static org.apache.jackrabbit.oak.plugins.segment.TestUtils.randomRecordIdMap; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -import java.io.File; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Random; -import java.util.Set; -import java.util.UUID; - -import com.google.common.collect.ImmutableList; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class CompactionMapTest { - - private final Random rnd = new Random(); - - private final boolean usePersistedMap; - - private FileStore store; - - private Map referenceMap1; - private Map referenceMap2; - private Map referenceMap3; - private Map referenceMap = newHashMap(); - - private PartialCompactionMap compactionMap1; - private PartialCompactionMap compactionMap2; - private PartialCompactionMap compactionMap3; - private CompactionMap compactionMap; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - @Parameterized.Parameters - public static List fixtures() { - return ImmutableList.of(new Boolean[] {true}, new Boolean[] {false}); - } - - @Before - public void setUp() throws Exception { - store = FileStore.builder(folder.getRoot()).build(); - - compactionMap1 = createCompactionMap(usePersistedMap); - referenceMap1 = randomRecordIdMap(rnd, store.getTracker(), 10, 10); - putAll(compactionMap1, referenceMap1); - referenceMap.putAll(referenceMap1); - - compactionMap2 = createCompactionMap(usePersistedMap); - referenceMap2 = randomRecordIdMap(rnd, store.getTracker(), 10, 10); - putAll(compactionMap2, referenceMap2); - referenceMap.putAll(referenceMap2); - - compactionMap3 = createCompactionMap(usePersistedMap); - referenceMap3 = randomRecordIdMap(rnd, store.getTracker(), 10, 10); - putAll(compactionMap3, referenceMap3); - referenceMap.putAll(referenceMap3); - - this.compactionMap = CompactionMap.EMPTY.cons(compactionMap3).cons(compactionMap2).cons(compactionMap1); - } - - @After - public void tearDown() { - store.close(); - } - - private PartialCompactionMap createCompactionMap(boolean persisted) { - if (persisted) { - return new PersistedCompactionMap(store.getTracker()); - } else { - return new InMemoryCompactionMap(store.getTracker()); - } - } - - public CompactionMapTest(boolean usePersistedMap) { - this.usePersistedMap = usePersistedMap; - } - - private static void putAll(PartialCompactionMap map1, Map recordIdRecordIdMap) { - for (Entry tuple : recordIdRecordIdMap.entrySet()) { - map1.put(tuple.getKey(), tuple.getValue()); - } - } - - @Test - public void checkExistingKeys() { - for (Entry reference : referenceMap.entrySet()) { - assertEquals(reference.getValue(), compactionMap.get((reference.getKey()))); - } - } - - @Test - public void checkNonExistingKeys() { - for (RecordId keys : randomRecordIdMap(rnd, store.getTracker(), 10, 10).keySet()) { - if (!referenceMap.containsKey(keys)) { - assertNull(compactionMap.get(keys)); - } - } - } - - @Test - public void removeSome() { - Set removedUUIDs = newHashSet(); - for (int k = 0; k < 1 + rnd.nextInt(referenceMap.size()); k++) { - RecordId key = get(referenceMap.keySet(), rnd.nextInt(referenceMap.size())); - removedUUIDs.add(key.asUUID()); - } - - compactionMap.remove(removedUUIDs); - - for (Entry reference : referenceMap.entrySet()) { - RecordId key = reference.getKey(); - if (removedUUIDs.contains(key.asUUID())) { - assertNull(compactionMap.get(key)); - } else { - assertEquals(reference.getValue(), compactionMap.get(key)); - } - } - } - - private static long countUUIDs(Set recordIds) { - Set uuids = newHashSet(); - for (RecordId recordId : recordIds) { - uuids.add(recordId.asUUID()); - } - return uuids.size(); - } - - @Test - public void removeGeneration() { - compactionMap1.compress(); - compactionMap2.compress(); - compactionMap3.compress(); - - assertArrayEquals(new long[]{10, 10, 10}, compactionMap.getSegmentCounts()); - assertArrayEquals(new long[] {100, 100, 100}, compactionMap.getRecordCounts()); - - int expectedDepth = 3; - int expectedGeneration = 3; - long expectedSize = countUUIDs(referenceMap.keySet()); - assertEquals(expectedDepth, compactionMap.getDepth()); - assertEquals(expectedSize, sum(compactionMap.getSegmentCounts())); - assertEquals(expectedGeneration, compactionMap.getGeneration()); - - for (Map referenceMap : ImmutableList.of(referenceMap2, referenceMap1, referenceMap3)) { - Set removedUUIDs = newHashSet(); - for (RecordId key : referenceMap.keySet()) { - removedUUIDs.add(key.asUUID()); - } - compactionMap.remove(removedUUIDs); - expectedDepth--; - // Effect of removed generation is only seen after subsequent cons. See OAK-3317 - CompactionMap consed = compactionMap.cons(compactionMap1); - assertEquals(expectedDepth + 1, consed.getDepth()); - expectedSize -= removedUUIDs.size(); - assertEquals(expectedSize, sum(compactionMap.getSegmentCounts())); - assertEquals(expectedGeneration + 1, consed.getGeneration()); - } - - // one final 'cons' to trigger cleanup of empty maps - CompactionMap consed = compactionMap.cons(createCompactionMap(false)); - assertEquals(1, consed.getDepth()); - assertEquals(0, sum(compactionMap.getSegmentCounts())); - assertEquals(expectedGeneration + 1, consed.getGeneration()); - } - - /** - * See OAK-3511 - */ - @Test - public void removeRecentKey() { - compactionMap1.compress(); - - // Find a key not present in the compaction map - RecordId key = newRecordId(store.getTracker(), rnd); - while (compactionMap1.get(key) != null) { - key = newRecordId(store.getTracker(), rnd); - } - - // Add it and immediately remove it, after which is should be gone - compactionMap1.put(key, newRecordId(store.getTracker(), rnd)); - compactionMap1.remove(singleton(key.asUUID())); - assertNull("Compaction map must not contain removed key", compactionMap1.get(key)); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CompactorTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CompactorTest.java deleted file mode 100644 index b62d743..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CompactorTest.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static org.junit.Assert.assertFalse; - -import java.io.IOException; - -import com.google.common.base.Suppliers; -import junit.framework.Assert; -import org.apache.jackrabbit.oak.Oak; -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.security.OpenSecurityProvider; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -public class CompactorTest { - - private SegmentStore segmentStore; - - @Before - public void openSegmentStore() throws IOException { - segmentStore = new MemoryStore(); - } - - @After - public void closeSegmentStore() { - segmentStore.close(); - } - - @Test - public void testCompactor() throws Exception { - NodeStore store = SegmentNodeStore.builder(segmentStore).build(); - init(store); - - Compactor compactor = new Compactor(segmentStore.getTracker()); - addTestContent(store, 0); - - NodeState initial = store.getRoot(); - SegmentNodeState after = compactor - .compact(initial, store.getRoot(), initial); - Assert.assertEquals(store.getRoot(), after); - - addTestContent(store, 1); - after = compactor.compact(initial, store.getRoot(), initial); - Assert.assertEquals(store.getRoot(), after); - } - - @Test - public void testCancel() throws Throwable { - - // Create a Compactor that will cancel itself as soon as possible. The - // early cancellation is the reason why the returned SegmentNodeState - // doesn't have the child named "b". - - NodeStore store = SegmentNodeStore.builder(segmentStore).build(); - Compactor compactor = new Compactor(segmentStore.getTracker(), Suppliers.ofInstance(true)); - SegmentNodeState sns = compactor.compact(store.getRoot(), addChild(store.getRoot(), "b"), store.getRoot()); - assertFalse(sns.hasChildNode("b")); - } - - private NodeState addChild(NodeState current, String name) { - NodeBuilder builder = current.builder(); - builder.child(name); - return builder.getNodeState(); - } - - private static void init(NodeStore store) { - new Oak(store).with(new OpenSecurityProvider()) - .createContentRepository(); - } - - private static void addTestContent(NodeStore store, int index) - throws CommitFailedException { - NodeBuilder builder = store.getRoot().builder(); - builder.child("test" + index); - builder.child("child" + index); - store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CompareAgainstBaseStateTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CompareAgainstBaseStateTest.java deleted file mode 100644 index fd78f73..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/CompareAgainstBaseStateTest.java +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; -import static org.easymock.EasyMock.createControl; -import static org.easymock.EasyMock.expect; -import static org.easymock.EasyMock.replay; -import static org.easymock.EasyMock.verify; - -import java.io.IOException; - -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStateDiff; -import org.junit.Before; -import org.junit.Test; - -/** - * Test case for segment node state comparisons. - */ -public class CompareAgainstBaseStateTest { - - private final NodeStateDiff diff = - createControl().createMock("diff", NodeStateDiff.class); - - private NodeBuilder builder; - - public CompareAgainstBaseStateTest() throws IOException { - builder = new MemoryStore().getTracker().getWriter().writeNode(EMPTY_NODE).builder(); - } - - @Before - public void setUp() { - builder.setProperty("foo", "abc"); - builder.setProperty("bar", 123); - builder.child("baz"); - } - - @Test - public void testSameState() { - NodeState node = builder.getNodeState(); - - replay(diff); - - node.compareAgainstBaseState(node, diff); - verify(diff); - } - - @Test - public void testEqualState() { - NodeState before = builder.getNodeState(); - NodeState after = before.builder().getNodeState(); - - replay(diff); - - after.compareAgainstBaseState(before, diff); - verify(diff); - } - - @Test - public void testPropertyAdded() { - NodeState before = builder.getNodeState(); - builder = before.builder(); - builder.setProperty("test", "test"); - NodeState after = builder.getNodeState(); - - expect(diff.propertyAdded(after.getProperty("test"))).andReturn(true); - replay(diff); - - after.compareAgainstBaseState(before, diff); - verify(diff); - } - - @Test - public void testPropertyChanged() { - NodeState before = builder.getNodeState(); - builder = before.builder(); - builder.setProperty("foo", "test"); - NodeState after = builder.getNodeState(); - - expect(diff.propertyChanged( - before.getProperty("foo"), after.getProperty("foo"))).andReturn(true); - replay(diff); - - after.compareAgainstBaseState(before, diff); - verify(diff); - } - - @Test - public void testPropertyDeleted() { - NodeState before = builder.getNodeState(); - builder = before.builder(); - builder.removeProperty("foo"); - NodeState after = builder.getNodeState(); - - expect(diff.propertyDeleted(before.getProperty("foo"))).andReturn(true); - replay(diff); - - after.compareAgainstBaseState(before, diff); - verify(diff); - } - - @Test - public void testChildNodeAdded() { - NodeState before = builder.getNodeState(); - builder = before.builder(); - builder.child("test"); - NodeState after = builder.getNodeState(); - - expect(diff.childNodeAdded("test", after.getChildNode("test"))).andReturn(true); - replay(diff); - - after.compareAgainstBaseState(before, diff); - verify(diff); - } - - @Test - public void testChildNodeChanged() { - NodeState before = builder.getNodeState(); - builder.child("baz").setProperty("test", "test"); - NodeState after = builder.getNodeState(); - - expect(diff.childNodeChanged( - "baz", before.getChildNode("baz"), after.getChildNode("baz"))).andReturn(true); - replay(diff); - - after.compareAgainstBaseState(before, diff); - verify(diff); - } - - @Test - public void testChildNodeDeleted() { - NodeState before = builder.getNodeState(); - builder.getChildNode("baz").remove(); - NodeState after = builder.getNodeState(); - - expect(diff.childNodeDeleted("baz", before.getChildNode("baz"))).andReturn(true); - replay(diff); - - after.compareAgainstBaseState(before, diff); - verify(diff); - } - - @Test - public void testChildNodeDeletedAndMultipleAdded() { - NodeState before = builder.getNodeState(); - builder.getChildNode("baz").remove(); - builder.child("x"); - builder.child("y"); - NodeState after = builder.getNodeState(); - - expect(diff.childNodeDeleted("baz", before.getChildNode("baz"))).andReturn(true); - expect(diff.childNodeAdded("x", after.getChildNode("x"))).andReturn(true); - expect(diff.childNodeAdded("y", after.getChildNode("x"))).andReturn(true); - replay(diff); - - after.compareAgainstBaseState(before, diff); - verify(diff); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/ExternalBlobIT.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/ExternalBlobIT.java deleted file mode 100644 index 928093d..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/ExternalBlobIT.java +++ /dev/null @@ -1,263 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static org.apache.jackrabbit.oak.commons.FixturesHelper.Fixture.SEGMENT_MK; -import static org.apache.jackrabbit.oak.commons.FixturesHelper.getFixtures; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeTrue; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.util.List; -import java.util.Random; - -import javax.annotation.Nonnull; - -import com.google.common.collect.Lists; -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.core.data.FileDataStore; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.plugins.blob.ReferenceCollector; -import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; -import org.apache.jackrabbit.oak.plugins.memory.AbstractBlob; -import org.apache.jackrabbit.oak.plugins.segment.file.FileBlob; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.spi.blob.BlobOptions; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.junit.After; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -public class ExternalBlobIT { - - private SegmentStore store; - private SegmentNodeStore nodeStore; - private FileBlob fileBlob; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - @BeforeClass - public static void assumptions() { - assumeTrue(getFixtures().contains(SEGMENT_MK)); - } - - @Test @Ignore("would need a FileBlobStore for this") - public void testFileBlob() throws Exception { - nodeStore = getNodeStore(new TestBlobStore()); - testCreateAndRead(getFileBlob()); - } - - @Test - public void testDataStoreBlob() throws Exception { - FileDataStore fds = createFileDataStore(); - DataStoreBlobStore dbs = new DataStoreBlobStore(fds); - nodeStore = getNodeStore(dbs); - - //Test for Blob which get inlined - Blob b1 = testCreateAndRead(createBlob(fds.getMinRecordLength()-2)); - assertTrue(b1 instanceof SegmentBlob); - assertNull(((SegmentBlob) b1).getBlobId()); - - //Test for Blob which need to be pushed to BlobStore - byte[] data2 = new byte[Segment.MEDIUM_LIMIT + 1]; - new Random().nextBytes(data2); - Blob b2 = testCreateAndRead(nodeStore.createBlob(new ByteArrayInputStream(data2))); - assertTrue(b2 instanceof SegmentBlob); - assertNotNull(b2.getReference()); - assertEquals(b2.getContentIdentity(), ((SegmentBlob) b2).getBlobId()); - - InputStream is = dbs.getInputStream(((SegmentBlob) b2).getBlobId()); - assertNotNull(IOUtils.contentEquals(new ByteArrayInputStream(data2), is)); - is.close(); - } - - @Test - public void testNullBlobId() throws Exception{ - FileDataStore fds = createFileDataStore(); - DataStoreBlobStore dbs = new DataStoreBlobStore(fds); - nodeStore = getNodeStore(dbs); - - NodeBuilder nb = nodeStore.getRoot().builder(); - NodeBuilder cb = nb.child("hello"); - cb.setProperty("blob1", createBlob(Segment.MEDIUM_LIMIT - 1)); - - int noOfBlobs = 4000; - for(int i = 0; i < noOfBlobs; i++){ - cb.setProperty("blob"+i, createBlob(Segment.MEDIUM_LIMIT+1)); - } - - cb.setProperty("anotherBlob2", createBlob(Segment.MEDIUM_LIMIT + 1)); - cb.setProperty("anotherBlob3", createBlob(Segment.MEDIUM_LIMIT + 1)); - nodeStore.merge(nb, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - final List refrences = Lists.newArrayList(); - store.getTracker().collectBlobReferences(new ReferenceCollector() { - @Override - public void addReference(String reference, String nodeId) { - assertNotNull(reference); - refrences.add(reference); - } - }); - - assertEquals(noOfBlobs + 2, refrences.size()); - } - - private Blob testCreateAndRead(Blob blob) throws Exception { - NodeState state = nodeStore.getRoot().getChildNode("hello"); - if (!state.exists()) { - NodeBuilder builder = nodeStore.getRoot().builder(); - builder.child("hello"); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - } - - NodeBuilder builder = nodeStore.getRoot().builder(); - builder.getChildNode("hello").setProperty("world", blob); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - state = nodeStore.getRoot().getChildNode("hello"); - blob = state.getProperty("world").getValue(Type.BINARY); - - assertTrue("Blob written and read must be equal", - AbstractBlob.equal(blob, blob)); - return blob; - } - - @After - public void close() throws IOException { - if (store != null) { - store.close(); - } - } - - protected SegmentNodeStore getNodeStore(BlobStore blobStore) throws Exception { - if (nodeStore == null) { - store = FileStore.builder(getWorkDir()).withBlobStore(blobStore).withMaxFileSize(256).withMemoryMapping(false).build(); - nodeStore = SegmentNodeStore.builder(store).build(); - } - return nodeStore; - } - - private Blob createBlob(int size) throws IOException { - byte[] data = new byte[size]; - new Random().nextBytes(data); - return nodeStore.createBlob(new ByteArrayInputStream(data)); - } - - private FileDataStore createFileDataStore() { - FileDataStore fds = new FileDataStore(); - fds.setMinRecordLength(4092); - fds.init(getWorkDir().getAbsolutePath()); - return fds; - } - - private File getWorkDir(){ - return folder.getRoot(); - } - - private FileBlob getFileBlob() throws IOException { - if (fileBlob == null) { - File file = folder.newFile(); - - byte[] data = new byte[2345]; - new Random().nextBytes(data); - FileUtils.writeByteArrayToFile(file, data); - - fileBlob = new FileBlob(file.getPath()); - } - return fileBlob; - } - - private class TestBlobStore implements BlobStore { - @Override - public String writeBlob(InputStream in) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public String writeBlob(InputStream in, BlobOptions options) throws IOException { - return writeBlob(in); - } - - @Override - public int readBlob(String blobId, long pos, byte[] buff, int off, int length) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public long getBlobLength(String blobId) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public InputStream getInputStream(String blobId) throws IOException { - if(blobId.equals(fileBlob.getReference())){ - return fileBlob.getNewStream(); - } - return null; - } - - @Override - public String getBlobId(@Nonnull String reference) { - return reference; - } - - @Override - public String getReference(@Nonnull String blobId) { - return blobId; - } - } - - @Test - public void testSize() throws Exception { - FileDataStore fds = createFileDataStore(); - DataStoreBlobStore dbs = new DataStoreBlobStore(fds); - nodeStore = getNodeStore(dbs); - - int size = Segment.MEDIUM_LIMIT + 1; - byte[] data2 = new byte[size]; - new Random().nextBytes(data2); - - Blob b = nodeStore.createBlob(new ByteArrayInputStream(data2)); - NodeBuilder builder = nodeStore.getRoot().builder(); - builder.child("hello").setProperty("world", b); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - - PropertyState ps = nodeStore.getRoot().getChildNode("hello").getProperty("world"); - // world = {2318851547697882338 bytes} - - assertEquals(size, ps.size()); - // assertEquals("{" + size + " bytes}", ps.toString()); - } -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/HeavyWriteIT.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/HeavyWriteIT.java deleted file mode 100644 index e0aee36..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/HeavyWriteIT.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static org.apache.jackrabbit.oak.commons.CIHelper.travis; -import static org.apache.jackrabbit.oak.commons.FixturesHelper.Fixture.SEGMENT_MK; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.CleanupType.CLEAN_OLD; -import static org.junit.Assume.assumeTrue; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.Callable; -import java.util.concurrent.atomic.AtomicBoolean; - -import javax.annotation.Nonnull; - -import com.google.common.collect.ImmutableList; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.commons.FixturesHelper; -import org.apache.jackrabbit.oak.commons.FixturesHelper.Fixture; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class HeavyWriteIT { - private static final Set FIXTURES = FixturesHelper.getFixtures(); - - private final boolean usePersistedMap; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - private File getFileStoreFolder() { - return folder.getRoot(); - } - - @Parameterized.Parameters(name="usePersistedMap: {0}") - public static List fixtures() { - return ImmutableList.of(new Boolean[] {true}, new Boolean[] {false}); - } - - public HeavyWriteIT(boolean usePersistedMap) { - this.usePersistedMap = usePersistedMap; - } - - @BeforeClass - public static void checkFixtures() { - assumeTrue(!travis()); // FIXME OAK-2375. Often fails on Travis - assumeTrue(FIXTURES.contains(SEGMENT_MK)); - } - - @Test - public void heavyWrite() throws Exception { - final FileStore store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(128).withMemoryMapping(false).build(); - final SegmentNodeStore nodeStore = SegmentNodeStore.builder(store).build(); - CompactionStrategy custom = new CompactionStrategy(false, false, - CLEAN_OLD, 30000, (byte) 0) { - @Override - public boolean compacted(@Nonnull Callable setHead) throws Exception { - return nodeStore.locked(setHead); - } - }; - custom.setPersistCompactionMap(usePersistedMap); - store.setCompactionStrategy(custom); - - int writes = 100; - final AtomicBoolean run = new AtomicBoolean(true); - Thread thread = new Thread(new Runnable() { - @Override - public void run() { - for (int k = 1; run.get(); k++) { - store.gc(); - try { - Thread.sleep(5000); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - break; - } - } - } - }); - thread.start(); - - try { - for (int k = 1; k<=writes; k++) { - NodeBuilder root = nodeStore.getRoot().builder(); - NodeBuilder test = root.setChildNode("test"); - createNodes(nodeStore, test, 10, 2); - nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - root = nodeStore.getRoot().builder(); - root.getChildNode("test").remove(); - nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY); - } - - } finally { - run.set(false); - thread.join(); - store.close(); - } - } - - private static void createNodes(NodeStore nodeStore, NodeBuilder builder, int count, int depth) throws IOException { - if (depth > 0) { - for (int k = 0; k < count; k++) { - NodeBuilder child = builder.setChildNode("node" + k); - createProperties(nodeStore, child, count); - createNodes(nodeStore, child, count, depth - 1); - } - } - } - - private static void createProperties(NodeStore nodeStore, NodeBuilder builder, int count) throws IOException { - for (int k = 0; k < count; k++) { - builder.setProperty("property-" + k, createBlob(nodeStore, 100000)); - } - } - - private static Blob createBlob(NodeStore nodeStore, int size) throws IOException { - byte[] data = new byte[size]; - new Random().nextBytes(data); - return nodeStore.createBlob(new ByteArrayInputStream(data)); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/InitializerTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/InitializerTest.java deleted file mode 100644 index 908c048..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/InitializerTest.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import java.io.IOException; - -import com.google.common.collect.ImmutableMap; -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.plugins.nodetype.write.InitialContent; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; -import org.apache.jackrabbit.oak.security.SecurityProviderImpl; -import org.apache.jackrabbit.oak.spi.security.ConfigurationParameters; -import org.apache.jackrabbit.oak.spi.security.user.UserConfiguration; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.Test; - -public class InitializerTest { - - @Test - public void testInitializerSegment() throws CommitFailedException, IOException { - NodeStore store = SegmentNodeStore.builder(new MemoryStore()).build(); - - NodeBuilder builder = store.getRoot().builder(); - new InitialContent().initialize(builder); - - SecurityProviderImpl provider = new SecurityProviderImpl( - ConfigurationParameters.of(ImmutableMap.of(UserConfiguration.NAME, - ConfigurationParameters.of(ImmutableMap.of("anonymousId", "anonymous", - "adminId", "admin", - "usersPath", "/home/users", - "groupsPath", "/home/groups", - "defaultDepth", "1"))))); - provider.getConfiguration(UserConfiguration.class).getWorkspaceInitializer().initialize( - builder, "default"); - builder.getNodeState(); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/MapRecordTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/MapRecordTest.java deleted file mode 100644 index 532a32a..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/MapRecordTest.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Sets.newHashSet; -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; -import static org.easymock.EasyMock.createControl; -import static org.easymock.EasyMock.expect; -import static org.easymock.EasyMock.replay; -import static org.easymock.EasyMock.verify; - -import java.io.IOException; -import java.util.Set; -import java.util.regex.Pattern; - -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStateDiff; -import org.junit.Test; - -import com.google.common.collect.Sets; - -/** - * Test case for segment node state comparisons. - */ -public class MapRecordTest { - - private final NodeStateDiff diff = - createControl().createMock("diff", NodeStateDiff.class); - - private NodeBuilder builder; - - public MapRecordTest() throws IOException { - builder = new MemoryStore().getTracker().getWriter().writeNode(EMPTY_NODE).builder(); - } - - @Test - public void testOak1104() { - Pattern pattern = Pattern.compile(", "); - Set beforeNames = newHashSet(pattern.split( - "_b_Lucene41_0.doc, _b.fdx, _b.fdt, segments_34, _b_4.del," - + " _b_Lucene41_0.pos, _b.nvm, _b.nvd, _b.fnm, _3n.si," - + " _b_Lucene41_0.tip, _b_Lucene41_0.tim, _3n.cfe," - + " segments.gen, _3n.cfs, _b.si")); - Set afterNames = newHashSet(pattern.split( - "_b_Lucene41_0.pos, _3k.cfs, _3j_1.del, _b.nvm, _b.nvd," - + " _3d.cfe, _3d.cfs, _b.fnm, _3j.si, _3h.si, _3i.cfe," - + " _3i.cfs, _3e_2.del, _3f.si, _b_Lucene41_0.tip," - + " _b_Lucene41_0.tim, segments.gen, _3e.cfe, _3e.cfs," - + " _b.si, _3g.si, _3l.si, _3i_1.del, _3d_3.del, _3e.si," - + " _3d.si, _b_Lucene41_0.doc, _3h_2.del, _3i.si, _3k_1.del," - + " _3j.cfe, _3j.cfs, _b.fdx, _b.fdt, _3g_1.del, _3k.si," - + " _3l.cfe, _3l.cfs, segments_33, _3f_1.del, _3h.cfe," - + " _3h.cfs, _b_4.del, _3f.cfe, _3f.cfs, _3g.cfe, _3g.cfs")); - - for (String name : beforeNames) { - builder.setChildNode(name); - } - NodeState before = builder.getNodeState(); - - for (String name : Sets.difference(beforeNames, afterNames)) { - builder.getChildNode(name).remove(); - } - for (String name : Sets.difference(afterNames, beforeNames)) { - builder.setChildNode(name); - } - NodeState after = builder.getNodeState(); - - for (String name : Sets.difference(beforeNames, afterNames)) { - expect(diff.childNodeDeleted(name, before.getChildNode(name))).andReturn(true); - } - for (String name : Sets.difference(afterNames, beforeNames)) { - expect(diff.childNodeAdded(name, after.getChildNode(name))).andReturn(true); - } - replay(diff); - - after.compareAgainstBaseState(before, diff); - verify(diff); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/MergeTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/MergeTest.java deleted file mode 100644 index 9ebcd3a..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/MergeTest.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static junit.framework.Assert.assertFalse; -import static junit.framework.Assert.assertTrue; -import static junit.framework.Assert.fail; - -import java.io.IOException; -import java.util.concurrent.Semaphore; -import java.util.concurrent.atomic.AtomicBoolean; - -import javax.annotation.Nonnull; - -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; -import org.apache.jackrabbit.oak.spi.commit.CommitHook; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.Test; - -public class MergeTest { - - @Test - public void testSequentialMerge() throws CommitFailedException, IOException { - NodeStore store = SegmentNodeStore.builder(new MemoryStore()).build(); - - assertFalse(store.getRoot().hasProperty("foo")); - assertFalse(store.getRoot().hasProperty("bar")); - - NodeBuilder a = store.getRoot().builder(); - a.setProperty("foo", "abc"); - store.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - assertTrue(store.getRoot().hasProperty("foo")); - assertFalse(store.getRoot().hasProperty("bar")); - - NodeBuilder b = store.getRoot().builder(); - b.setProperty("bar", "xyz"); - store.merge(b, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - assertTrue(store.getRoot().hasProperty("foo")); - assertTrue(store.getRoot().hasProperty("bar")); - } - - @Test - public void testOptimisticMerge() throws CommitFailedException, IOException { - NodeStore store = SegmentNodeStore.builder(new MemoryStore()).build(); - - NodeBuilder a = store.getRoot().builder(); - a.setProperty("foo", "abc"); - - NodeBuilder b = store.getRoot().builder(); - b.setProperty("bar", "xyz"); - - assertFalse(store.getRoot().hasProperty("foo")); - assertFalse(store.getRoot().hasProperty("bar")); - - store.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - assertTrue(store.getRoot().hasProperty("foo")); - assertFalse(store.getRoot().hasProperty("bar")); - - store.merge(b, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - assertTrue(store.getRoot().hasProperty("foo")); - assertTrue(store.getRoot().hasProperty("bar")); - } - - @Test - public void testPessimisticMerge() throws Exception { - final SegmentNodeStore store = SegmentNodeStore.builder(new MemoryStore()).build(); - final Semaphore semaphore = new Semaphore(0); - final AtomicBoolean running = new AtomicBoolean(true); - - Thread background = new Thread() { - @Override - public void run() { - for (int i = 0; running.get(); i++) { - try { - NodeBuilder a = store.getRoot().builder(); - a.setProperty("foo", "abc" + i); - store.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY); - semaphore.release(); - } catch (CommitFailedException e) { - fail(); - } - } - } - }; - background.start(); - - // wait for the first commit - semaphore.acquire(); - - assertTrue(store.getRoot().hasProperty("foo")); - assertFalse(store.getRoot().hasProperty("bar")); - - NodeBuilder b = store.getRoot().builder(); - b.setProperty("bar", "xyz"); - store.setMaximumBackoff(100); - store.merge(b, new CommitHook() { - @Override @Nonnull - public NodeState processCommit( - NodeState before, NodeState after, CommitInfo info) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - fail(); - } - return after; - } - }, CommitInfo.EMPTY); - - assertTrue(store.getRoot().hasProperty("foo")); - assertTrue(store.getRoot().hasProperty("bar")); - - running.set(false); - background.join(); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/PartialCompactionMapTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/PartialCompactionMapTest.java deleted file mode 100644 index 65dd840..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/PartialCompactionMapTest.java +++ /dev/null @@ -1,415 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Iterables.get; -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Maps.newHashMap; -import static com.google.common.collect.Sets.newHashSet; -import static junit.framework.Assert.assertTrue; -import static org.apache.jackrabbit.oak.commons.IOUtils.humanReadableByteCount; -import static org.apache.jackrabbit.oak.commons.benchmark.MicroBenchmark.run; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.MAX_SEGMENT_SIZE; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.V_11; -import static org.apache.jackrabbit.oak.plugins.segment.TestUtils.newValidOffset; -import static org.apache.jackrabbit.oak.plugins.segment.TestUtils.randomRecordIdMap; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assume.assumeTrue; - -import java.io.File; -import java.io.IOException; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Random; -import java.util.Set; -import java.util.UUID; - -import com.google.common.collect.ImmutableList; -import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics; -import org.apache.jackrabbit.oak.commons.benchmark.MicroBenchmark.Benchmark; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - *

- * This is a unit test + benchmark test for all the compaction map - * implementations. - *

- *

- * The benchmark tests are disabled by default, to run one of them you - * need to set the specific {@code benchmark.*} system property:
- * {@code mvn test -Dtest.opts.memory=-Xmx5G -Dtest=PartialCompactionMapTest -Dbenchmark.benchLargeMap=true -Dbenchmark.benchPut=true -Dbenchmark.benchGet=true} - *

- */ -@RunWith(Parameterized.class) -public class PartialCompactionMapTest { - private static final Logger log = LoggerFactory.getLogger(PartialCompactionMapTest.class); - private static final int SEED = Integer.getInteger("SEED", new Random().nextInt()); - - private final Random rnd = new Random(SEED); - private final boolean usePersistedMap; - - private FileStore segmentStore; - - private Map reference; - private PartialCompactionMap map; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - @Parameterized.Parameters - public static List fixtures() { - return ImmutableList.of(new Boolean[] {true}, new Boolean[] {false}); - } - - public PartialCompactionMapTest(boolean usePersistedMap) { - this.usePersistedMap = usePersistedMap; - } - - @Before - public void setup() throws Exception { - segmentStore = FileStore.builder(folder.getRoot()).build(); - } - - @After - public void tearDown() { - segmentStore.close(); - } - - private SegmentTracker getTracker() { - return segmentStore.getTracker(); - } - - private PartialCompactionMap createCompactionMap() { - SegmentWriter writer = new SegmentWriter(segmentStore, V_11, ""); - if (usePersistedMap) { - return new PersistedCompactionMap(segmentStore.getTracker()); - } else { - return new InMemoryCompactionMap(segmentStore.getTracker()); - } - } - - private void addAll(Map toAdd) { - assert map != null; - for (Entry tuple : toAdd.entrySet()) { - if (reference != null) { - reference.put(tuple.getKey(), tuple.getValue()); - } - map.put(tuple.getKey(), tuple.getValue()); - } - } - - private void addRandomEntries(int segmentCount, int entriesPerSegment) { - assert map != null; - for (int k = 0; k < segmentCount / 1000; k++) { - addAll(randomRecordIdMap(rnd, getTracker(), 1000, entriesPerSegment)); - } - addAll(randomRecordIdMap(rnd, getTracker(), segmentCount % 1000, entriesPerSegment)); - } - - private void removeRandomEntries(int count) { - assert reference != null; - assert map != null; - Set remove = newHashSet(); - for (int k = 0; k < count && !reference.isEmpty(); k++) { - int j = rnd.nextInt(reference.size()); - remove.add(get(reference.keySet(), j).getSegmentId()); - } - - Set removeUUIDs = newHashSet(); - for (SegmentId sid : remove) { - removeUUIDs.add(new UUID(sid.getMostSignificantBits(), sid.getLeastSignificantBits())); - Iterator it = reference.keySet().iterator(); - while (it.hasNext()) { - if (sid.equals(it.next().getSegmentId())) { - it.remove(); - } - } - } - - map.remove(removeUUIDs); - } - - private void checkMap() { - assert reference != null; - assert map != null; - for (Entry entry : reference.entrySet()) { - assertTrue("Failed with seed " + SEED, - map.wasCompactedTo(entry.getKey(), entry.getValue())); - assertFalse("Failed with seed " + SEED, - map.wasCompactedTo(entry.getValue(), entry.getKey())); - } - } - - @Test - public void single() { - map = createCompactionMap(); - RecordId before = RecordId.fromString(getTracker(), "00000000-0000-0000-0000-000000000000.0000"); - RecordId after = RecordId.fromString(getTracker(), "11111111-1111-1111-1111-111111111111.1111"); - - map.put(before, after); - assertEquals(after, map.get(before)); - map.compress(); - assertEquals(after, map.get(before)); - assertEquals(1, map.getRecordCount()); - assertEquals(1, map.getSegmentCount()); - } - - @Test - public void remove() { - map = createCompactionMap(); - RecordId before1 = RecordId.fromString(getTracker(), "00000000-0000-0000-0000-000000000000.0000"); - RecordId before2 = RecordId.fromString(getTracker(), "00000000-0000-0000-0000-000000000000.1111"); - RecordId after1 = RecordId.fromString(getTracker(), "11111111-1111-1111-1111-111111111111.0000"); - RecordId after2 = RecordId.fromString(getTracker(), "11111111-1111-1111-1111-111111111111.1111"); - - map.put(before1, after1); - map.compress(); - map.put(before2, after2); - assertEquals(after1, map.get(before1)); - assertEquals(after2, map.get(before2)); - - map.remove(newHashSet(before1.asUUID())); - assertNull(map.get(before1)); - assertNull(map.get(before2)); - assertEquals(0, map.getRecordCount()); - assertEquals(0, map.getSegmentCount()); - } - - private static Set toUUID(Set recordIds) { - Set uuids = newHashSet(); - for (RecordId recordId : recordIds) { - uuids.add(recordId.asUUID()); - } - return uuids; - } - - @Test - public void random() { - int maxSegments = 1000; - int entriesPerSegment = 10; - reference = newHashMap(); - map = createCompactionMap(); - - for (int k = 0; k < 10; k++) { - addRandomEntries(rnd.nextInt(maxSegments) + 1, rnd.nextInt(entriesPerSegment) + 1); - if (!reference.isEmpty()) { - removeRandomEntries(rnd.nextInt(reference.size())); - } - checkMap(); - } - map.compress(); - assertEquals(reference.size(), map.getRecordCount()); - assertEquals(toUUID(reference.keySet()).size(), map.getSegmentCount()); - checkMap(); - } - - private static void assertHeapSize(long size) { - long mem = Runtime.getRuntime().maxMemory(); - assertTrue("Need " + humanReadableByteCount(size) + - ", only found " + humanReadableByteCount(mem), mem >= size); - } - - @Test - public void benchLargeMap() { - assumeTrue(Boolean.getBoolean("benchmark.benchLargeMap")); - assertHeapSize(4000000000L); - - map = createCompactionMap(); - - // check the memory use of really large mappings, 1M compacted segments with 10 records each. - Runtime runtime = Runtime.getRuntime(); - for (int i = 0; i < 1000; i++) { - Map ids = randomRecordIdMap(rnd, getTracker(), 10000, 100); - long start = System.nanoTime(); - for (Entry entry : ids.entrySet()) { - map.put(entry.getKey(), entry.getValue()); - } - log.info( - "Bench Large Map #" + - (i + 1) + ": " + (runtime.totalMemory() - runtime.freeMemory()) / - (1024 * 1024) + "MB, " + (System.nanoTime() - start) / 1000000 + "ms"); - } - } - - @Test - public void benchPut() throws Exception { - assumeTrue(Boolean.getBoolean("benchmark.benchPut")); - assertHeapSize(4000000000L); - - run(new PutBenchmark(0, 100)); - run(new PutBenchmark(10, 100)); - run(new PutBenchmark(100, 100)); - run(new PutBenchmark(1000, 100)); - run(new PutBenchmark(10000, 100)); - run(new PutBenchmark(100000, 100)); - run(new PutBenchmark(1000000, 100)); - } - - @Test - public void benchGet() throws Exception { - assumeTrue(Boolean.getBoolean("benchmark.benchGet")); - assertHeapSize(4000000000L); - - run(new GetBenchmark(0, 100)); - run(new GetBenchmark(10, 100)); - run(new GetBenchmark(100, 100)); - run(new GetBenchmark(1000, 100)); - run(new GetBenchmark(10000, 100)); - run(new GetBenchmark(100000, 100)); - run(new GetBenchmark(1000000, 100)); - } - - private abstract static class LoggingBenchmark extends Benchmark { - - @Override - public void result(DescriptiveStatistics statistics) { - log.info("{}", this); - if (statistics.getN() > 0) { - log.info(String - .format("%6s %6s %6s %6s %6s %6s %6s %6s", "min", - "10%", "50%", "90%", "max", "mean", "stdev", "N")); - log.info(String - .format("%6.0f %6.0f %6.0f %6.0f %6.0f %6.0f %6.0f %6d", - statistics.getMin() / 1000000, - statistics.getPercentile(10.0) / 1000000, - statistics.getPercentile(50.0) / 1000000, - statistics.getPercentile(90.0) / 1000000, - statistics.getMax() / 1000000, - statistics.getMean() / 1000000, - statistics.getStandardDeviation() / 1000000, - statistics.getN())); - } else { - log.info("No results"); - } - } - } - - private class PutBenchmark extends LoggingBenchmark { - private final int segmentCount; - private final int entriesPerSegment; - - private Map putIds; - - public PutBenchmark(int segmentCount, int entriesPerSegment) { - this.segmentCount = segmentCount; - this.entriesPerSegment = entriesPerSegment; - } - - @Override - public void setup() throws IOException { - map = createCompactionMap(); - if (segmentCount > 0) { - addRandomEntries(segmentCount, entriesPerSegment); - } - } - - @Override - public void beforeRun() throws Exception { - putIds = randomRecordIdMap(rnd, getTracker(), 10000 / entriesPerSegment, entriesPerSegment); - } - - @Override - public void run() throws IOException { - for (Entry tuple : putIds.entrySet()) { - map.put(tuple.getKey(), tuple.getValue()); - } - } - - @Override - public String toString() { - return "Put benchmark: SegmentCount=" + segmentCount + ", entriesPerSegment=" + entriesPerSegment; - } - } - - private class GetBenchmark extends LoggingBenchmark { - private final int segmentCount; - private final int entriesPerSegment; - - private final List getCandidateIds = newArrayList(); - private final List getIds = newArrayList(); - - public GetBenchmark(int segmentCount, int entriesPerSegment) { - this.segmentCount = segmentCount; - this.entriesPerSegment = entriesPerSegment; - } - - @Override - public void setup() throws IOException { - map = createCompactionMap(); - reference = new HashMap() { - @Override - public RecordId put(RecordId key, RecordId value) { - // Wow, what a horrendous hack!! - if (key.getSegmentId().getMostSignificantBits() % 10000 == 0) { - getCandidateIds.add(key); - } - return null; - } - }; - - addRandomEntries(segmentCount, entriesPerSegment); - map.compress(); - for (int k = 0; k < 10000; k++) { - getCandidateIds.add(new RecordId( - getTracker().newDataSegmentId(), - newValidOffset(rnd, 0, MAX_SEGMENT_SIZE))); - } - } - - @Override - public void beforeRun() throws Exception { - for (int k = 0; k < 10000; k ++) { - getIds.add(getCandidateIds.get(rnd.nextInt(getCandidateIds.size()))); - } - } - - @Override - public void run() { - for (RecordId id : getIds) { - map.get(id); - } - } - - @Override - public void afterRun() throws Exception { - getIds.clear(); - } - - @Override - public String toString() { - return "Get benchmark: segmentCount=" + segmentCount + ", entriesPerSegment=" + entriesPerSegment; - } - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/RecordIdMapTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/RecordIdMapTest.java deleted file mode 100644 index 6884850..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/RecordIdMapTest.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Maps.newHashMap; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.MAX_SEGMENT_SIZE; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.RECORD_ALIGN_BITS; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.encode; -import static org.apache.jackrabbit.oak.plugins.segment.TestUtils.newValidOffset; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Random; - -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; -import org.junit.Test; - -public class RecordIdMapTest { - - @Test - public void testEmpty() { - RecordIdMap map = new RecordIdMap(); - assertFalse(map.containsKey((short) 0)); - assertNull(map.get((short) 0)); - assertEquals(0, map.size()); - try { - map.getKey(0); - fail("Expected AIOBE"); - } catch (ArrayIndexOutOfBoundsException ignored) {} - try { - map.getRecordId(0); - fail("Expected AIOBE"); - } catch (ArrayIndexOutOfBoundsException ignored) {} - } - - @Test - public void testRecordIdMap() throws IOException { - int maxSegments = 1000; - int maxEntriesPerSegment = 10; - int seed = new Random().nextInt(); - Random r = new Random(seed); - - SegmentTracker tracker = new MemoryStore().getTracker(); - RecordIdMap map = new RecordIdMap(); - Map reference = newHashMap(); - int segments = r.nextInt(maxSegments); - for (int i = 0; i < segments; i++) { - SegmentId id = tracker.newDataSegmentId(); - int n = r.nextInt(maxEntriesPerSegment); - int offset = MAX_SEGMENT_SIZE; - for (int j = 0; j < n; j++) { - offset = newValidOffset(r, (n - j) << RECORD_ALIGN_BITS, offset); - RecordId record = new RecordId(id, offset); - reference.put(encode(record.getOffset()), record); - } - } - for (Entry entry : reference.entrySet()) { - map.put(entry.getKey(), entry.getValue()); - } - - assertEquals("Failed with seed " + seed, reference.size(), map.size()); - for (Entry entry : reference.entrySet()) { - short key = entry.getKey(); - assertTrue("Failed with seed " + seed, map.containsKey(key)); - - RecordId expected = entry.getValue(); - RecordId actual = map.get(key); - assertEquals("Failed with seed " + seed, expected, actual); - } - - for (int k = 0; k < map.size(); k++) { - short key = map.getKey(k); - RecordId expected = reference.get(key); - RecordId actual = map.get(key); - assertEquals("Failed with seed " + seed, expected, actual); - assertEquals("Failed with seed " + seed, expected, map.getRecordId(k)); - } - } -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/RecordTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/RecordTest.java deleted file mode 100644 index ab742d7..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/RecordTest.java +++ /dev/null @@ -1,388 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Maps.newHashMap; -import static java.util.Collections.singletonList; -import static junit.framework.Assert.assertNotNull; -import static junit.framework.Assert.fail; -import static org.apache.jackrabbit.oak.api.Type.BINARIES; -import static org.apache.jackrabbit.oak.api.Type.STRING; -import static org.apache.jackrabbit.oak.api.Type.STRINGS; -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; -import static org.apache.jackrabbit.oak.plugins.segment.ListRecord.LEVEL_SIZE; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.readString; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Random; - -import com.google.common.base.Charsets; -import com.google.common.collect.ImmutableMap; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.junit.Test; - -public class RecordTest { - - private final String hello = "Hello, World!"; - - private final byte[] bytes = hello.getBytes(Charsets.UTF_8); - - private final SegmentStore store; - - private final SegmentWriter writer; - - private final Random random = new Random(0xcafefaceL); - - public RecordTest() throws IOException { - store = new MemoryStore(); - writer = store.getTracker().getWriter(); - } - - @Test - public void testBlockRecord() throws IOException { - RecordId blockId = writer.writeBlock(bytes, 0, bytes.length); - BlockRecord block = new BlockRecord(blockId, bytes.length); - - // Check reading with all valid positions and lengths - for (int n = 1; n < bytes.length; n++) { - for (int i = 0; i + n <= bytes.length; i++) { - Arrays.fill(bytes, i, i + n, (byte) '.'); - assertEquals(n, block.read(i, bytes, i, n)); - assertEquals(hello, new String(bytes, Charsets.UTF_8)); - } - } - - // Check reading with a too long length - byte[] large = new byte[bytes.length * 2]; - assertEquals(bytes.length, block.read(0, large, 0, large.length)); - assertEquals(hello, new String(large, 0, bytes.length, Charsets.UTF_8)); - } - - @Test - public void testListRecord() throws IOException { - RecordId blockId = writer.writeBlock(bytes, 0, bytes.length); - - ListRecord one = writeList(1, blockId); - ListRecord level1 = writeList(LEVEL_SIZE, blockId); - ListRecord level1p = writeList(LEVEL_SIZE + 1, blockId); - ListRecord level2 = writeList(LEVEL_SIZE * LEVEL_SIZE, blockId); - ListRecord level2p = writeList(LEVEL_SIZE * LEVEL_SIZE + 1, blockId); - - assertEquals(1, one.size()); - assertEquals(blockId, one.getEntry(0)); - assertEquals(LEVEL_SIZE, level1.size()); - assertEquals(blockId, level1.getEntry(0)); - assertEquals(blockId, level1.getEntry(LEVEL_SIZE - 1)); - assertEquals(LEVEL_SIZE + 1, level1p.size()); - assertEquals(blockId, level1p.getEntry(0)); - assertEquals(blockId, level1p.getEntry(LEVEL_SIZE)); - assertEquals(LEVEL_SIZE * LEVEL_SIZE, level2.size()); - assertEquals(blockId, level2.getEntry(0)); - assertEquals(blockId, level2.getEntry(LEVEL_SIZE * LEVEL_SIZE - 1)); - assertEquals(LEVEL_SIZE * LEVEL_SIZE + 1, level2p.size()); - assertEquals(blockId, level2p.getEntry(0)); - assertEquals(blockId, level2p.getEntry(LEVEL_SIZE * LEVEL_SIZE)); - - int count = 0; - for (RecordId entry : level2p.getEntries()) { - assertEquals(blockId, entry); - assertEquals(blockId, level2p.getEntry(count)); - count++; - } - assertEquals(LEVEL_SIZE * LEVEL_SIZE + 1, count); - } - - private ListRecord writeList(int size, RecordId id) throws IOException { - List list = Collections.nCopies(size, id); - return new ListRecord(writer.writeList(list), size); - } - - @Test - public void testListWithLotsOfReferences() throws IOException { // OAK-1184 - SegmentTracker factory = store.getTracker(); - List list = newArrayList(); - for (int i = 0; i < 1000; i++) { - list.add(new RecordId(factory.newBulkSegmentId(), 0)); - } - writer.writeList(list); - } - - @Test - public void testStreamRecord() throws IOException { - checkRandomStreamRecord(0); - checkRandomStreamRecord(1); - checkRandomStreamRecord(0x79); - checkRandomStreamRecord(0x80); - checkRandomStreamRecord(0x4079); - checkRandomStreamRecord(0x4080); - checkRandomStreamRecord(SegmentWriter.BLOCK_SIZE); - checkRandomStreamRecord(SegmentWriter.BLOCK_SIZE + 1); - checkRandomStreamRecord(Segment.MAX_SEGMENT_SIZE); - checkRandomStreamRecord(Segment.MAX_SEGMENT_SIZE + 1); - checkRandomStreamRecord(Segment.MAX_SEGMENT_SIZE * 2); - checkRandomStreamRecord(Segment.MAX_SEGMENT_SIZE * 2 + 1); - } - - private void checkRandomStreamRecord(int size) throws IOException { - byte[] source = new byte[size]; - random.nextBytes(source); - - Blob value = writer.writeStream(new ByteArrayInputStream(source)); - InputStream stream = value.getNewStream(); - try { - byte[] b = new byte[349]; // prime number - int offset = 0; - for (int n = stream.read(b); n != -1; n = stream.read(b)) { - for (int i = 0; i < n; i++) { - assertEquals(source[offset + i], b[i]); - } - offset += n; - } - assertEquals(offset, size); - assertEquals(-1, stream.read()); - } finally { - stream.close(); - } - } - - @Test - public void testStringRecord() throws IOException { - RecordId empty = writer.writeString(""); - RecordId space = writer.writeString(" "); - RecordId hello = writer.writeString("Hello, World!"); - - StringBuilder builder = new StringBuilder(); - for (int i = 0; i < 2 * Segment.MAX_SEGMENT_SIZE + 1000; i++) { - builder.append((char) ('0' + i % 10)); - } - RecordId large = writer.writeString(builder.toString()); - - Segment segment = large.getSegmentId().getSegment(); - - assertEquals("", readString(empty)); - assertEquals(" ", readString(space)); - assertEquals("Hello, World!", readString(hello)); - assertEquals(builder.toString(), readString(large)); - } - - @Test - public void testMapRecord() throws IOException { - RecordId blockId = writer.writeBlock(bytes, 0, bytes.length); - - MapRecord zero = writer.writeMap( - null, ImmutableMap.of()); - MapRecord one = writer.writeMap( - null, ImmutableMap.of("one", blockId)); - MapRecord two = writer.writeMap( - null, ImmutableMap.of("one", blockId, "two", blockId)); - Map map = newHashMap(); - for (int i = 0; i < 1000; i++) { - map.put("key" + i, blockId); - } - MapRecord many = writer.writeMap(null, map); - - Iterator iterator; - - assertEquals(0, zero.size()); - assertNull(zero.getEntry("one")); - iterator = zero.getEntries().iterator(); - assertFalse(iterator.hasNext()); - - assertEquals(1, one.size()); - assertEquals(blockId, one.getEntry("one").getValue()); - assertNull(one.getEntry("two")); - iterator = one.getEntries().iterator(); - assertTrue(iterator.hasNext()); - assertEquals("one", iterator.next().getName()); - assertFalse(iterator.hasNext()); - - assertEquals(2, two.size()); - assertEquals(blockId, two.getEntry("one").getValue()); - assertEquals(blockId, two.getEntry("two").getValue()); - assertNull(two.getEntry("three")); - iterator = two.getEntries().iterator(); - assertTrue(iterator.hasNext()); - iterator.next(); - assertTrue(iterator.hasNext()); - iterator.next(); - assertFalse(iterator.hasNext()); - - assertEquals(1000, many.size()); - iterator = many.getEntries().iterator(); - for (int i = 0; i < 1000; i++) { - assertTrue(iterator.hasNext()); - assertEquals(blockId, iterator.next().getValue()); - assertEquals(blockId, many.getEntry("key" + i).getValue()); - } - assertFalse(iterator.hasNext()); - assertNull(many.getEntry("foo")); - - Map changes = newHashMap(); - changes.put("key0", null); - changes.put("key1000", blockId); - MapRecord modified = writer.writeMap(many, changes); - assertEquals(1000, modified.size()); - iterator = modified.getEntries().iterator(); - for (int i = 1; i <= 1000; i++) { - assertTrue(iterator.hasNext()); - assertEquals(blockId, iterator.next().getValue()); - assertEquals(blockId, modified.getEntry("key" + i).getValue()); - } - assertFalse(iterator.hasNext()); - assertNull(many.getEntry("foo")); - } - - @Test - public void testMapRemoveNonExisting() throws IOException { - RecordId blockId = writer.writeBlock(bytes, 0, bytes.length); - - Map changes = newHashMap(); - changes.put("one", null); - MapRecord zero = writer.writeMap(null, changes); - assertEquals(0, zero.size()); - } - - @Test - public void testWorstCaseMap() throws IOException { - RecordId blockId = writer.writeBlock(bytes, 0, bytes.length); - Map map = newHashMap(); - char[] key = new char[2]; - for (int i = 0; i <= MapRecord.BUCKETS_PER_LEVEL; i++) { - key[0] = (char) ('A' + i); - key[1] = (char) ('\u1000' - key[0] * 31); - map.put(new String(key), blockId); - } - - MapRecord bad = writer.writeMap(null, map); - - assertEquals(map.size(), bad.size()); - Iterator iterator = bad.getEntries().iterator(); - for (int i = 0; i < map.size(); i++) { - assertTrue(iterator.hasNext()); - assertEquals('\u1000', iterator.next().getName().hashCode()); - } - assertFalse(iterator.hasNext()); - } - - @Test - public void testEmptyNode() throws IOException { - NodeState before = EMPTY_NODE; - NodeState after = writer.writeNode(before); - assertEquals(before, after); - } - - @Test - public void testSimpleNode() throws IOException { - NodeState before = EMPTY_NODE.builder() - .setProperty("foo", "abc") - .setProperty("bar", 123) - .setProperty("baz", Math.PI) - .getNodeState(); - NodeState after = writer.writeNode(before); - assertEquals(before, after); - } - - @Test - public void testDeepNode() throws IOException { - NodeBuilder root = EMPTY_NODE.builder(); - NodeBuilder builder = root; - for (int i = 0; i < 1000; i++) { - builder = builder.child("test"); - } - NodeState before = builder.getNodeState(); - NodeState after = writer.writeNode(before); - assertEquals(before, after); - } - - @Test - public void testManyMapDeletes() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - for (int i = 0; i < 1000; i++) { - builder.child("test" + i); - } - NodeState before = writer.writeNode(builder.getNodeState()); - assertEquals(builder.getNodeState(), before); - - builder = before.builder(); - for (int i = 0; i < 900; i++) { - builder.getChildNode("test" + i).remove(); - } - NodeState after = writer.writeNode(builder.getNodeState()); - assertEquals(builder.getNodeState(), after); - } - - @Test - public void testMultiValuedBinaryPropertyAcrossSegments() - throws IOException { - // biggest possible inlined value record - byte[] data = new byte[Segment.MEDIUM_LIMIT - 1]; - random.nextBytes(data); - - // create enough copies of the value to fill a full segment - List blobs = newArrayList(); - while (blobs.size() * data.length < Segment.MAX_SEGMENT_SIZE) { - blobs.add(writer.writeStream(new ByteArrayInputStream(data))); - } - - // write a simple node that'll now be stored in a separate segment - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("test", blobs, BINARIES); - NodeState state = writer.writeNode(builder.getNodeState()); - - // all the blobs should still be accessible, even if they're - // referenced from another segment - for (Blob blob : state.getProperty("test").getValue(BINARIES)) { - try { - blob.getNewStream().close(); - } catch (IllegalStateException e) { - fail("OAK-1374"); - } - } - } - - @Test - public void testStringPrimaryType() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("jcr:primaryType", "foo", STRING); - NodeState state = writer.writeNode(builder.getNodeState()); - assertNotNull(state.getProperty("jcr:primaryType")); - } - - @Test - public void testStringMixinTypes() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("jcr:mixinTypes", singletonList("foo"), STRINGS); - NodeState state = writer.writeNode(builder.getNodeState()); - assertNotNull(state.getProperty("jcr:mixinTypes")); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/RecordUsageAnalyserTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/RecordUsageAnalyserTest.java deleted file mode 100644 index 0604718..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/RecordUsageAnalyserTest.java +++ /dev/null @@ -1,349 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Strings.repeat; -import static java.util.Collections.nCopies; -import static org.apache.jackrabbit.oak.api.Type.LONGS; -import static org.apache.jackrabbit.oak.api.Type.NAME; -import static org.apache.jackrabbit.oak.api.Type.NAMES; -import static org.apache.jackrabbit.oak.api.Type.STRINGS; -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; -import static org.apache.jackrabbit.oak.plugins.segment.ListRecord.LEVEL_SIZE; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.MEDIUM_LIMIT; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.SMALL_LIMIT; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.V_10; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.V_11; -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import java.io.IOException; -import java.util.List; -import java.util.Random; - -import com.google.common.collect.ImmutableList; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.plugins.memory.ArrayBasedBlob; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class RecordUsageAnalyserTest { - private final SegmentVersion segmentVersion; - - private SegmentStore store; - private SegmentWriter writer; - private RecordUsageAnalyser analyser = new RecordUsageAnalyser(); - - @Parameterized.Parameters - public static List fixtures() { - return ImmutableList.of(new SegmentVersion[] {V_10}, new SegmentVersion[] {V_11}); - } - - public RecordUsageAnalyserTest(SegmentVersion segmentVersion) { - this.segmentVersion = segmentVersion; - } - - @Before - public void setup() { - store = mock(SegmentStore.class); - SegmentTracker tracker = new SegmentTracker(store); - when(store.getTracker()).thenReturn(tracker); - writer = new SegmentWriter(store, segmentVersion, ""); - analyser = new RecordUsageAnalyser(); - } - - @Test - public void emptyNode() throws IOException { - SegmentNodeState node = writer.writeNode(EMPTY_NODE); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 0, 0, 4, 3); - } - - @Test - public void nodeWithInt() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("one", 1); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 0, 6, 8, 6); - } - - @Test - public void nodeWithString() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("two", "222"); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 0, 8, 8, 6); - } - - @Test - public void nodeWithMultipleProperties() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("one", "11"); - builder.setProperty("two", "22"); - builder.setProperty("three", "33"); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - if (segmentVersion == V_11) { - assertSizes(analyser, 0, 18, 23, 10, 6); - } else { - assertSizes(analyser, 0, 0, 23, 16, 12); - } - } - - @Test - public void nodeWithMediumString() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("medium", repeat("a", SMALL_LIMIT + 1)); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 0, 138, 8, 6); - } - - @Test - public void nodeWithLargeString() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("large", repeat("b", MEDIUM_LIMIT + 1)); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 15, 16530, 8, 6); - } - - @Test - public void nodeWithSameString() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("two", "two"); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 0, 4, 8, 6); - } - - @Test - public void nodeWithInts() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("multi", ImmutableList.of(1L, 2L, 3L, 4L), LONGS); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 12, 21, 8, 6); - } - - @Test - public void nodeWithManyInts() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("multi", nCopies(LEVEL_SIZE + 1, 1L), LONGS); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 771, 15, 8, 6); - } - - @Test - public void nodeWithManyIntsAndOne() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("multi", nCopies(LEVEL_SIZE + 2, 1L), LONGS); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 777, 15, 8, 6); - } - - @Test - public void nodeWithStrings() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("multi", ImmutableList.of("one", "one", "two", "two", "three"), STRINGS); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 15, 27, 8, 6); - } - - @Test - public void nodeWithBlob() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("blob", createRandomBlob(4)); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 0, 10, 8, 6); - } - - @Test - public void nodeWithMediumBlob() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("mediumBlob", createRandomBlob(SMALL_LIMIT + 1)); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 0, 142, 8, 6); - } - - @Test - public void nodeWithLargeBlob() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("largeBlob", createRandomBlob(MEDIUM_LIMIT + 1)); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 15, 16534, 8, 6); - } - - @Test - public void nodeWithPrimaryType() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("jcr:primaryType", "type", NAME); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 0, 5, 7, 3); - } - - @Test - public void nodeWithMixinTypes() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("jcr:mixinTypes", ImmutableList.of("type1", "type2"), NAMES); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 0, 12, 10, 3); - } - - @Test - public void singleChild() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setChildNode("child"); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 0, 0, 6, 11, 9); - } - - @Test - public void multiChild() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setChildNode("child1"); - builder.setChildNode("child2"); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 24, 0, 14, 8, 12); - } - - @Test - public void manyChild() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - for (int k = 0; k < MapRecord.BUCKETS_PER_LEVEL + 1; k++) { - builder.setChildNode("child" + k); - } - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 457, 0, 254, 8, 105); - } - - @Test - public void changedChild() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setChildNode("child1"); - builder.setChildNode("child2"); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 24, 0, 14, 8, 12); - - builder = node.builder(); - builder.child("child1").setProperty("p", "q"); - - when(store.containsSegment(node.getRecordId().getSegmentId())).thenReturn(true); - node = (SegmentNodeState) builder.getNodeState(); - - analyser.analyseNode(node.getRecordId()); - assertSizes(analyser, 41, 0, 18, 16, 24); - } - - @Test - public void counts() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setChildNode("child1"); - builder.setChildNode("child2"); - builder.setProperty("prop", ImmutableList.of("a", "b"), STRINGS); - builder.setProperty("mediumString", repeat("m", SMALL_LIMIT)); - builder.setProperty("longString", repeat("l", MEDIUM_LIMIT)); - builder.setProperty("smallBlob", createRandomBlob(4)); - builder.setProperty("mediumBlob", createRandomBlob(SMALL_LIMIT)); - builder.setProperty("longBlob", createRandomBlob(MEDIUM_LIMIT)); - - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - analyser.analyseNode(node.getRecordId()); - if (segmentVersion == V_11) { - assertCounts(analyser, 1, 5, 6, 1, 1, 1, 0, 10, 1, 1, 2, 3); - } else { - assertCounts(analyser, 1, 3, 6, 1, 1, 1, 0, 10, 1, 1, 2, 3); - } - } - - private static Blob createRandomBlob(int size) { - byte[] bytes = new byte[size]; - new Random().nextBytes(bytes); - return new ArrayBasedBlob(bytes); - } - - private static void assertSizes(RecordUsageAnalyser analyser, - long maps, long lists, long values, long templates, long nodes) { - assertEquals("maps sizes mismatch", maps, analyser.getMapSize()); - assertEquals("lists sizes mismatch", lists, analyser.getListSize()); - assertEquals("value sizes mismatch", values, analyser.getValueSize()); - assertEquals("template sizes mismatch", templates, analyser.getTemplateSize()); - assertEquals("nodes sizes mismatch", nodes, analyser.getNodeSize()); - } - - private static void assertCounts(RecordUsageAnalyser analyser, - long mapCount, long listCount, long propertyCount, - long smallBlobCount, long mediumBlobCount, long longBlobCount, long externalBlobCount, - long smallStringCount, long mediumStringCount, long longStringCount, - long templateCount, long nodeCount) { - assertEquals("map count mismatch", mapCount, analyser.getMapCount()); - assertEquals("list count mismatch", listCount, analyser.getListCount()); - assertEquals("property count mismatch", propertyCount, analyser.getPropertyCount()); - assertEquals("small blob count mismatch", smallBlobCount, analyser.getSmallBlobCount()); - assertEquals("medium blob mismatch", mediumBlobCount, analyser.getMediumBlobCount()); - assertEquals("long blob count mismatch", longBlobCount, analyser.getLongBlobCount()); - assertEquals("external blob count mismatch", externalBlobCount, analyser.getExternalBlobCount()); - assertEquals("small string count mismatch", smallStringCount, analyser.getSmallStringCount()); - assertEquals("medium string count mismatch", mediumStringCount, analyser.getMediumStringCount()); - assertEquals("long string count mismatch", longStringCount, analyser.getLongStringCount()); - assertEquals("template count mismatch", templateCount, analyser.getTemplateCount()); - assertEquals("node count mismatch", nodeCount, analyser.getNodeCount()); - - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCompactionIT.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCompactionIT.java deleted file mode 100644 index ecb91d5..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCompactionIT.java +++ /dev/null @@ -1,1059 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.collect.Iterables.get; -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Sets.newConcurrentHashSet; -import static com.google.common.util.concurrent.Futures.addCallback; -import static com.google.common.util.concurrent.Futures.immediateCancelledFuture; -import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator; -import static java.lang.Boolean.getBoolean; -import static java.lang.Integer.MAX_VALUE; -import static java.lang.String.valueOf; -import static java.lang.System.getProperty; -import static java.util.concurrent.TimeUnit.MINUTES; -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.apache.commons.lang.RandomStringUtils.randomAlphabetic; -import static org.apache.jackrabbit.oak.plugins.segment.CompactionMap.sum; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.CleanupType.CLEAN_OLD; -import static org.junit.Assume.assumeTrue; -import static org.slf4j.helpers.MessageFormatter.arrayFormat; -import static org.slf4j.helpers.MessageFormatter.format; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.lang.management.ManagementFactory; -import java.util.Date; -import java.util.Iterator; -import java.util.List; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.Callable; -import java.util.concurrent.CancellationException; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import javax.management.InstanceAlreadyExistsException; -import javax.management.MBeanRegistrationException; -import javax.management.MBeanServer; -import javax.management.NotCompliantMBeanException; -import javax.management.ObjectName; - -import com.google.common.base.Predicate; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListenableScheduledFuture; -import com.google.common.util.concurrent.ListeningScheduledExecutorService; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.cache.CacheStats; -import org.apache.jackrabbit.oak.commons.jmx.AnnotatedStandardMBean; -import org.apache.jackrabbit.oak.plugins.commit.ConflictHook; -import org.apache.jackrabbit.oak.plugins.commit.DefaultConflictHandler; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy; -import org.apache.jackrabbit.oak.plugins.segment.compaction.DefaultCompactionStrategyMBean; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStoreGCMonitor; -import org.apache.jackrabbit.oak.spi.commit.CommitHook; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.CompositeHook; -import org.apache.jackrabbit.oak.spi.gc.GCMonitor; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.apache.jackrabbit.oak.spi.whiteboard.CompositeRegistration; -import org.apache.jackrabbit.oak.spi.whiteboard.Registration; -import org.apache.jackrabbit.oak.stats.Clock; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - *

This is a longevity test for SegmentMK compaction for {@code OAK-2849 Improve revision gc on SegmentMK}

- * - *

The test schedules a number of readers, writers, a compactor and holds some references for a certain time. - * All of which can be interactively modified through the accompanying - * {@link SegmentCompactionITMBean}, the - * {@link org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategyMBean} and the - * {@link org.apache.jackrabbit.oak.plugins.segment.file.GCMonitorMBean}.

- * - *

The test is disabled by default, to run it you need to set the {@code SegmentCompactionIT} system property:
- * {@code mvn test -Dtest=SegmentCompactionIT -Dtest.opts.memory=-Xmx4G} - *

- * - *

TODO Leverage longevity test support from OAK-2771 once we have it.

- */ -public class SegmentCompactionIT { - private static final boolean PERSIST_COMPACTION_MAP = !getBoolean("in-memory-compaction-map"); - - /** Only run if explicitly asked to via -Dtest=SegmentCompactionIT */ - private static final boolean ENABLED = - SegmentCompactionIT.class.getSimpleName().equals(getProperty("test")); - - private static final Logger LOG = LoggerFactory.getLogger(SegmentCompactionIT.class); - - private final MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer(); - - private final Random rnd = new Random(); - private final ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(50); - private final ListeningScheduledExecutorService scheduler = listeningDecorator(executor); - private final FileStoreGCMonitor fileStoreGCMonitor = new FileStoreGCMonitor(Clock.SIMPLE); - private final TestGCMonitor gcMonitor = new TestGCMonitor(fileStoreGCMonitor); - private final Set> writers = newConcurrentHashSet(); - private final Set> readers = newConcurrentHashSet(); - private final Set> references = newConcurrentHashSet(); - private final SegmentCompactionITMBean segmentCompactionMBean = new SegmentCompactionITMBean(); - - private FileStore fileStore; - private SegmentNodeStore nodeStore; - private CompactionStrategy compactionStrategy; - private Registration mBeanRegistration; - - private volatile ListenableFuture compactor = immediateCancelledFuture(); - private volatile ReadWriteLock compactionLock = null; - private volatile int lockWaitTime = 60; - private volatile int maxReaders = 10; - private volatile int maxWriters = 10; - private volatile long maxStoreSize = 200000000000L; - private volatile int maxBlobSize = 1000000; - private volatile int maxStringSize = 10000; - private volatile int maxReferences = 10; - private volatile int maxWriteOps = 10000; - private volatile int maxNodeCount = 1000; - private volatile int maxPropertyCount = 1000; - private volatile int nodeRemoveRatio = 10; - private volatile int propertyRemoveRatio = 10; - private volatile int nodeAddRatio = 40; - private volatile int addStringRatio = 20; - private volatile int addBinaryRatio = 20; - private volatile int compactionInterval = 1; - private volatile boolean stopping; - private volatile Reference rootReference; - private volatile long fileStoreSize; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - public synchronized void stop() { - stopping = true; - notifyAll(); - } - - public void addReaders(int count) { - for (int c = 0; c < count; c++) { - scheduleReader(); - } - } - - public void removeReaders(int count) { - remove(readers, count); - } - - public void addWriters(int count) { - for (int c = 0; c < count; c++) { - scheduleWriter(); - } - } - - public void removeWriters(int count) { - remove(writers, count); - } - - public void removeReferences(int count) { - remove(references, count); - } - - private static void remove(Set> ops, int count) { - Iterator> it = ops.iterator(); - while (it.hasNext() && count-- > 0) { - it.next().cancel(false); - } - } - - private Registration registerMBean(Object mBean, final ObjectName objectName) - throws NotCompliantMBeanException, InstanceAlreadyExistsException, - MBeanRegistrationException { - mBeanServer.registerMBean(mBean, objectName); - return new Registration(){ - @Override - public void unregister() { - try { - mBeanServer.unregisterMBean(objectName); - } catch (Exception e) { - LOG.error("Error unregistering Segment Compaction MBean", e); - } - } - }; - } - - @Before - public void setUp() throws Exception { - assumeTrue(ENABLED); - - scheduler.scheduleAtFixedRate(new Runnable() { - @Override - public void run() { - fileStoreGCMonitor.run(); - } - }, 1, 1, SECONDS); - - fileStore = FileStore.builder(folder.getRoot()) - .withMemoryMapping(true) - .withGCMonitor(gcMonitor) - .build(); - SegmentNodeStore.SegmentNodeStoreBuilder nodeStoreBuilder = SegmentNodeStore - .builder(fileStore); - nodeStoreBuilder.withCompactionStrategy(false, false, - CLEAN_OLD.toString(), CompactionStrategy.TIMESTAMP_DEFAULT, - CompactionStrategy.MEMORY_THRESHOLD_DEFAULT, lockWaitTime, - CompactionStrategy.RETRY_COUNT_DEFAULT, - CompactionStrategy.FORCE_AFTER_FAIL_DEFAULT, - PERSIST_COMPACTION_MAP, - CompactionStrategy.GAIN_THRESHOLD_DEFAULT); - nodeStore = nodeStoreBuilder.build(); - - compactionStrategy = nodeStoreBuilder - .getCompactionStrategy(); - fileStore.setCompactionStrategy(compactionStrategy); - - CacheStats segmentCacheStats = fileStore.getTracker().getSegmentCacheStats(); - CacheStats stringCacheStats = fileStore.getTracker().getStringCacheStats(); - List registrations = newArrayList(); - registrations.add(registerMBean(segmentCompactionMBean, - new ObjectName("IT:TYPE=Segment Compaction"))); - registrations.add(registerMBean(new DefaultCompactionStrategyMBean(compactionStrategy), - new ObjectName("IT:TYPE=Compaction Strategy"))); - registrations.add(registerMBean(fileStoreGCMonitor, - new ObjectName("IT:TYPE=GC Monitor"))); - registrations.add(registerMBean(segmentCacheStats, - new ObjectName("IT:TYPE=" + segmentCacheStats.getName()))); - if (stringCacheStats != null) { - registrations.add(registerMBean(stringCacheStats, - new ObjectName("IT:TYPE=" + stringCacheStats.getName()))); - } - mBeanRegistration = new CompositeRegistration(registrations); - } - - @After - public void tearDown() { - if (mBeanRegistration != null) { - mBeanRegistration.unregister(); - } - remove(writers, MAX_VALUE); - remove(readers, MAX_VALUE); - remove(references, MAX_VALUE); - scheduler.shutdown(); - if (fileStore != null) { - fileStore.close(); - } - } - - @Test - public void run() throws InterruptedException { - scheduleSizeMonitor(); - scheduleCompactor(); - addReaders(maxReaders); - addWriters(maxWriters); - - synchronized (this) { - while (!stopping) { - wait(); - } - } - } - - private void scheduleSizeMonitor() { - scheduler.scheduleAtFixedRate(new Runnable() { - @Override - public void run() { - fileStoreSize = fileStore.size(); - } - }, 1, 1, MINUTES); - } - - private synchronized void scheduleCompactor() { - LOG.info("Scheduling compaction after {} minutes", compactionInterval); - compactor.cancel(false); - compactor = scheduler.schedule((new Compactor(fileStore, gcMonitor)), compactionInterval, MINUTES); - addCallback(compactor, new FutureCallback() { - @Override - public void onSuccess(Object result) { - scheduleCompactor(); - } - - @Override - public void onFailure(Throwable t) { - segmentCompactionMBean.error("Compactor error", t); - } - }); - } - - private void scheduleWriter() { - if (writers.size() < maxWriters) { - final RandomWriter writer = new RandomWriter(rnd, nodeStore, rnd.nextInt(maxWriteOps), "W" + rnd.nextInt(5)); - final ListenableScheduledFuture futureWriter = scheduler.schedule( - writer, rnd.nextInt(30), SECONDS); - writers.add(futureWriter); - addCallback(futureWriter, new FutureCallback() { - @Override - public void onSuccess(Void result) { - writers.remove(futureWriter); - if (!futureWriter.isCancelled()) { - scheduleWriter(); - } - } - - @Override - public void onFailure(Throwable t) { - writer.cancel(); - writers.remove(futureWriter); - segmentCompactionMBean.error("Writer error", t); - } - }); - } - } - - private void scheduleReader() { - if (readers.size() < maxReaders) { - final RandomReader reader = rnd.nextBoolean() - ? new RandomNodeReader(rnd, nodeStore) - : new RandomPropertyReader(rnd, nodeStore); - final ListenableScheduledFuture futureReader = scheduler.schedule( - reader, rnd.nextInt(30), SECONDS); - readers.add(futureReader); - addCallback(futureReader, new FutureCallback() { - @Override - public void onSuccess(Object node) { - readers.remove(futureReader); - if (!futureReader.isCancelled()) { - if (rnd.nextBoolean()) { - scheduleReference(node); - } else { - scheduleReader(); - } - } - } - - @Override - public void onFailure(Throwable t) { - reader.cancel(); - readers.remove(futureReader); - segmentCompactionMBean.error("Node reader error", t); - } - }); - } - } - - private void scheduleReference(Object object) { - if (references.size() < maxReferences) { - final Reference reference = new Reference(object); - final ListenableScheduledFuture futureReference = scheduler.schedule( - reference, rnd.nextInt(600), SECONDS); - references.add(futureReference); - addCallback(futureReference, new FutureCallback() { - @Override - public void onSuccess(Object result) { - references.remove(reference); - if (!futureReference.isCancelled()) { - scheduleReader(); - } - } - - @Override - public void onFailure(Throwable t) { - reference.run(); - references.remove(reference); - segmentCompactionMBean.error("Reference error", t); - } - }); - } else { - scheduleReader(); - } - } - - private class RandomWriter implements Callable { - private final Random rnd; - private final NodeStore nodeStore; - private final int opCount; - private final String itemPrefix; - - private volatile boolean cancelled; - - RandomWriter(Random rnd, NodeStore nodeStore, int opCount, String itemPrefix) { - this.rnd = rnd; - this.nodeStore = nodeStore; - this.opCount = opCount; - this.itemPrefix = itemPrefix; - } - - - public void cancel() { - cancelled = true; - } - - private T run(Callable thunk) throws Exception { - ReadWriteLock lock = compactionLock; - if (lock != null) { - lock.readLock().lock(); - try { - return thunk.call(); - } finally { - lock.readLock().unlock(); - } - } else { - return thunk.call(); - } - } - - @Override - public Void call() throws Exception { - return run(new Callable() { - @Override - public Void call() throws Exception { - NodeBuilder root = nodeStore.getRoot().builder(); - for (int k = 0; k < opCount && !cancelled; k++) { - modify(nodeStore, root); - } - if (!cancelled) { - try { - CommitHook commitHook = rnd.nextBoolean() - ? new CompositeHook(new ConflictHook(DefaultConflictHandler.OURS)) - : new CompositeHook(new ConflictHook(DefaultConflictHandler.THEIRS)); - nodeStore.merge(root, commitHook, CommitInfo.EMPTY); - segmentCompactionMBean.committed(); - } catch (CommitFailedException e) { - LOG.warn("Commit failed: {}", e.getMessage()); - } - } - return null; - } - }); - } - - private void modify(NodeStore nodeStore, NodeBuilder nodeBuilder) throws IOException { - int p0 = nodeRemoveRatio; - int p1 = p0 + propertyRemoveRatio; - int p2 = p1 + nodeAddRatio; - int p3 = p2 + addStringRatio; - double p = p3 + addBinaryRatio; - - boolean deleteOnly = fileStoreSize > maxStoreSize; - double k = rnd.nextDouble(); - if (k < p0/p) { - chooseRandomNode(nodeBuilder).remove(); - } else if (k < p1/p) { - removeRandomProperty(chooseRandomNode(nodeBuilder)); - } else if (k < p2/p && !deleteOnly) { - addRandomNode(nodeBuilder); - } else if (k < p3/p && !deleteOnly) { - addRandomValue(nodeBuilder); - } else if (!deleteOnly) { - addRandomBlob(nodeStore, nodeBuilder); - } - } - - private NodeBuilder chooseRandomNode(NodeBuilder nodeBuilder) { - NodeBuilder childBuilder = nodeBuilder; - for (int k = 0; k < rnd.nextInt(1000); k++) { - childBuilder = randomStep(nodeBuilder, nodeBuilder = childBuilder); - } - return childBuilder; - } - - private NodeBuilder chooseRandomNode(NodeBuilder nodeBuilder, Predicate predicate) { - NodeBuilder childBuilder = chooseRandomNode(nodeBuilder); - while (!predicate.apply(childBuilder)) { - childBuilder = randomStep(nodeBuilder, nodeBuilder = childBuilder); - } - return childBuilder; - } - - private NodeBuilder randomStep(NodeBuilder parent, NodeBuilder node) { - int count = (int) node.getChildNodeCount(Long.MAX_VALUE); - int k = rnd.nextInt(count + 1); - if (k == 0) { - return parent; - } else { - String name = get(node.getChildNodeNames(), k - 1); - return node.getChildNode(name); - } - } - - private void removeRandomProperty(NodeBuilder nodeBuilder) { - int count = (int) nodeBuilder.getPropertyCount(); - if (count > 0) { - PropertyState property = get(nodeBuilder.getProperties(), rnd.nextInt(count)); - nodeBuilder.removeProperty(property.getName()); - } - } - - private void addRandomNode(NodeBuilder nodeBuilder) { - chooseRandomNode(nodeBuilder, new Predicate() { - @Override - public boolean apply(NodeBuilder builder) { - return builder.getChildNodeCount(maxNodeCount) < maxNodeCount; - } - }).setChildNode('N' + itemPrefix + rnd.nextInt(maxNodeCount)); - } - - private void addRandomValue(NodeBuilder nodeBuilder) { - chooseRandomNode(nodeBuilder, new Predicate() { - @Override - public boolean apply(NodeBuilder builder) { - return builder.getPropertyCount() < maxPropertyCount; - } - }) - .setProperty('P' + itemPrefix + rnd.nextInt(maxPropertyCount), - randomAlphabetic(rnd.nextInt(maxStringSize))); - } - - private void addRandomBlob(NodeStore nodeStore, NodeBuilder nodeBuilder) throws IOException { - chooseRandomNode(nodeBuilder, new Predicate() { - @Override - public boolean apply(NodeBuilder builder) { - return builder.getPropertyCount() < maxPropertyCount; - } - }) - .setProperty('B' + itemPrefix + rnd.nextInt(maxPropertyCount), - createBlob(nodeStore, rnd.nextInt(maxBlobSize))); - } - - private Blob createBlob(NodeStore nodeStore, int size) throws IOException { - byte[] data = new byte[size]; - new Random().nextBytes(data); - return nodeStore.createBlob(new ByteArrayInputStream(data)); - } - } - - private abstract static class RandomReader implements Callable { - protected final Random rnd; - protected final NodeStore nodeStore; - - protected volatile boolean cancelled; - - RandomReader(Random rnd, NodeStore nodeStore) { - this.rnd = rnd; - this.nodeStore = nodeStore; - } - - public void cancel() { - cancelled = true; - } - - private NodeState randomStep(NodeState parent, NodeState node) { - int count = (int) node.getChildNodeCount(Long.MAX_VALUE); - int k = rnd.nextInt(count + 1); - if (k == 0) { - return parent; - } else { - String name = get(node.getChildNodeNames(), k - 1); - return node.getChildNode(name); - } - } - - protected final NodeState chooseRandomNode(NodeState parent) { - NodeState child = parent; - for (int k = 0; k < rnd.nextInt(1000) && !cancelled; k++) { - child = randomStep(parent, parent = child); - } - return child; - } - - protected final PropertyState chooseRandomProperty(NodeState node) { - int count = (int) node.getPropertyCount(); - if (count > 0) { - return get(node.getProperties(), rnd.nextInt(count)); - } else { - return null; - } - } - } - - private static class RandomNodeReader extends RandomReader { - RandomNodeReader(Random rnd, NodeStore nodeStore) { - super(rnd, nodeStore); - } - - @Override - public NodeState call() throws Exception { - return chooseRandomNode(nodeStore.getRoot()); - } - } - - private static class RandomPropertyReader extends RandomReader { - RandomPropertyReader(Random rnd, NodeStore nodeStore) { - super(rnd, nodeStore); - } - - @Override - public PropertyState call() throws Exception { - return chooseRandomProperty(chooseRandomNode(nodeStore.getRoot())); - } - } - - private static class Reference implements Runnable { - private volatile Object referent; - - Reference(Object referent) { - this.referent = referent; - } - - @Override - public void run() { - referent = null; - } - } - - private class Compactor implements Runnable { - private final FileStore fileStore; - private final TestGCMonitor gcMonitor; - - Compactor(FileStore fileStore, TestGCMonitor gcMonitor) { - this.fileStore = fileStore; - this.gcMonitor = gcMonitor; - } - - private T run(Callable thunk) throws Exception { - ReadWriteLock lock = compactionLock; - if (lock != null) { - lock.writeLock().lock(); - try { - return thunk.call(); - } finally { - lock.writeLock().unlock(); - } - } else { - return thunk.call(); - } - } - - @Override - public void run() { - if (gcMonitor.isCleaned()) { - LOG.info("Running compaction"); - try { - run(new Callable() { - @Override - public Void call() throws Exception { - gcMonitor.resetCleaned(); - fileStore.maybeCompact(true); - return null; - } - }); - } catch (Exception e) { - LOG.error("Error while running compaction", e); - } - } else { - LOG.info("Not running compaction as no cleanup has taken place"); - } - } - } - - private static class TestGCMonitor implements GCMonitor { - private final GCMonitor delegate; - private volatile boolean cleaned = true; - private volatile long lastCompacted; - - TestGCMonitor(GCMonitor delegate) { - this.delegate = delegate; - } - - @Override - public void info(String message, Object... arguments) { - System.out.println(arrayFormat(message, arguments).getMessage()); - delegate.info(message, arguments); - } - - @Override - public void warn(String message, Object... arguments) { - System.out.println(arrayFormat(message, arguments).getMessage()); - delegate.warn(message, arguments); - } - - @Override - public void error(String message, Exception exception) { - System.out.println(format(message, exception).getMessage()); - delegate.error(message, exception); - } - - @Override - public void skipped(String reason, Object... arguments) { - cleaned = true; - System.out.println(arrayFormat(reason, arguments).getMessage()); - delegate.skipped(reason, arguments); - } - - @Override - public void compacted() { - delegate.compacted(); - lastCompacted = System.currentTimeMillis(); - } - - @Override - public void cleaned(long reclaimedSize, long currentSize) { - cleaned = true; - delegate.cleaned(reclaimedSize, currentSize); - } - - @Override - public void updateStatus(String status) { - delegate.updateStatus(status); - } - - public boolean isCleaned() { - return cleaned; - } - - public void resetCleaned() { - cleaned = false; - } - - public long getLastCompacted() { - return lastCompacted; - } - - } - - private class SegmentCompactionITMBean extends AnnotatedStandardMBean implements SegmentCompactionMBean { - private final AtomicLong commitCount = new AtomicLong(); - - private String lastError; - - SegmentCompactionITMBean() { - super(SegmentCompactionMBean.class); - } - - @Override - public void stop() { - SegmentCompactionIT.this.stop(); - } - - @Override - public void setCorePoolSize(int corePoolSize) { - executor.setCorePoolSize(corePoolSize); - } - - @Override - public int getCorePoolSize() { - return executor.getCorePoolSize(); - } - - @Override - public void setCompactionInterval(int minutes) { - if (compactionInterval != minutes) { - compactionInterval = minutes; - scheduleCompactor(); - } - } - - @Override - public int getCompactionInterval() { - return compactionInterval; - } - - @Override - public String getLastCompaction() { - return valueOf(new Date(gcMonitor.getLastCompacted())); - } - - @Override - public void setUseCompactionLock(boolean value) { - if (value && compactionLock == null) { - compactionLock = new ReentrantReadWriteLock(); - } else { - compactionLock = null; - } - } - - @Override - public boolean getUseCompactionLock() { - return compactionLock != null; - } - - @Override - public void setLockWaitTime(int seconds) { - lockWaitTime = seconds; - } - - @Override - public int getLockWaitTime() { - return lockWaitTime; - } - - @Override - public void setMaxReaders(int count) { - checkArgument(count >= 0); - maxReaders = count; - if (count > readers.size()) { - addReaders(count - readers.size()); - } else { - removeReaders(readers.size() - count); - } - } - - @Override - public int getMaxReaders() { - return maxReaders; - } - - @Override - public void setMaxWriters(int count) { - checkArgument(count >= 0); - maxWriters = count; - if (count > writers.size()) { - addWriters(count - writers.size()); - } else { - removeWriters(writers.size() - count); - } - } - - @Override - public int getMaxWriters() { - return maxWriters; - } - - @Override - public void setMaxStoreSize(long size) { - maxStoreSize = size; - } - - @Override - public long getMaxStoreSize() { - return maxStoreSize; - } - - @Override - public void setMaxStringSize(int size) { - maxStringSize = size; - } - - @Override - public int getMaxStringSize() { - return maxStringSize; - } - - @Override - public void setMaxBlobSize(int size) { - maxBlobSize = size; - } - - @Override - public int getMaxBlobSize() { - return maxBlobSize; - } - - @Override - public void setMaxReferences(int count) { - checkArgument(count >= 0); - maxReferences = count; - if (count < references.size()) { - removeReferences(references.size() - count); - } - } - - @Override - public int getMaxReferences() { - return maxReferences; - } - - @Override - public void setMaxWriteOps(int count) { - checkArgument(count >= 0); - maxWriteOps = count; - } - - @Override - public int getMaxWriteOps() { - return maxWriteOps; - } - - @Override - public void setMaxNodeCount(int count) { - checkArgument(count >= 0); - maxNodeCount = count; - } - - @Override - public int getMaxNodeCount() { - return maxNodeCount; - } - - @Override - public void setMaxPropertyCount(int count) { - checkArgument(count >= 0); - maxPropertyCount = count; - } - - @Override - public int getMaxPropertyCount() { - return maxPropertyCount; - } - - @Override - public void setNodeRemoveRatio(int ratio) { - nodeRemoveRatio = ratio; - } - - @Override - public int getNodeRemoveRatio() { - return nodeRemoveRatio; - } - - @Override - public void setPropertyRemoveRatio(int ratio) { - propertyRemoveRatio = ratio; - } - - @Override - public int getPropertyRemoveRatio() { - return propertyRemoveRatio; - } - - @Override - public void setNodeAddRatio(int ratio) { - nodeAddRatio = ratio; - } - - @Override - public int getNodeAddRatio() { - return nodeAddRatio; - } - - @Override - public void setAddStringRatio(int ratio) { - addStringRatio = ratio; - } - - @Override - public int getAddStringRatio() { - return addStringRatio; - } - - @Override - public void setAddBinaryRatio(int ratio) { - addBinaryRatio = ratio; - } - - @Override - public int getAddBinaryRatio() { - return addBinaryRatio; - } - - @Override - public void setRootReference(boolean set) { - if (set && rootReference == null) { - rootReference = new Reference(nodeStore.getRoot()); - } else { - rootReference = null; - } - } - - @Override - public boolean getRootReference() { - return rootReference != null; - } - - @Override - public boolean getPersistCompactionMap() { - return compactionStrategy.getPersistCompactionMap(); - } - - @Override - public int getReaderCount() { - return readers.size(); - } - - @Override - public int getWriterCount() { - return writers.size(); - } - - @Override - public int getReferenceCount() { - return references.size(); - } - - @Override - public long getFileStoreSize() { - return fileStoreSize; - } - - private CompactionMap getCompactionMap() { - return fileStore.getTracker().getCompactionMap(); - } - - @Override - public long getCompactionMapWeight() { - return sum(getCompactionMap().getEstimatedWeights()); - } - - @Override - public long getSegmentCount() { - return sum(getCompactionMap().getSegmentCounts()); - } - - @Override - public long getRecordCount() { - return sum(getCompactionMap().getRecordCounts()); - } - - @Override - public int getCompactionMapDepth() { - return getCompactionMap().getDepth(); - } - - @Override - public String getLastError() { - return lastError; - } - - @Override - public long getCommitCount() { - return commitCount.get(); - } - - void error(String message, Throwable t) { - if (!(t instanceof CancellationException)) { - StringWriter sw = new StringWriter(); - sw.write(message + ": "); - t.printStackTrace(new PrintWriter(sw)); - lastError = sw.toString(); - - LOG.error(message, t); - } - } - - void committed() { - commitCount.incrementAndGet(); - } - } -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCompactionMBean.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCompactionMBean.java deleted file mode 100644 index 1334410..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentCompactionMBean.java +++ /dev/null @@ -1,307 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -/** - * MBean for monitoring and interacting with the {@link SegmentCompactionIT} - * longevity test. - */ -public interface SegmentCompactionMBean { - - /** - * Stop the test. - */ - void stop(); - - /** - * Set the core pool size of the scheduler used to execute concurrent - * operations. - * @param corePoolSize - */ - void setCorePoolSize(int corePoolSize); - - /** - * @return the core pool size of the scheduler used to execute concurrent - * operations. - */ - int getCorePoolSize(); - - /** - * Set the compaction interval - * @param minutes number of minutes to wait between compaction cycles. - */ - void setCompactionInterval(int minutes); - - /** - * @return the compaction interval in minutes. - */ - int getCompactionInterval(); - - /** - * @return Time stamp from when compaction last ran. - */ - String getLastCompaction(); - - /** - * Determine whether to compaction should run exclusively wrt. concurrent writers. - * @param value run compaction exclusively iff {@code true} - */ - void setUseCompactionLock(boolean value); - - /** - * @return Compaction runs exclusively wrt. concurrent writers iff {@code true} - */ - boolean getUseCompactionLock(); - - /** - * Time to wait for the commit lock for committing the compacted head. - * @param seconds number of seconds to wait - * @see SegmentNodeStore#locked(java.util.concurrent.Callable, long, java.util.concurrent.TimeUnit) - */ - void setLockWaitTime(int seconds); - - /** - * Time to wait for the commit lock for committing the compacted head. - * @return number of seconds - * @see SegmentNodeStore#locked(java.util.concurrent.Callable, long, java.util.concurrent.TimeUnit) - */ - int getLockWaitTime(); - - /** - * Set the maximal number of concurrent readers - * @param count - */ - void setMaxReaders(int count); - - /** - * @return maximal number of concurrent readers - */ - int getMaxReaders(); - - /** - * Set the maximal number of concurrent writers - * @param count - */ - void setMaxWriters(int count); - - /** - * @return maximal number of concurrent writers - */ - int getMaxWriters(); - - /** - * Set the maximal size of the store - * @param size size in bytes - */ - void setMaxStoreSize(long size); - - /** - * @return maximal size of the store in bytes - */ - long getMaxStoreSize(); - - /** - * Set the maximal size of string properties - * @param size size in bytes - */ - void setMaxStringSize(int size); - - /** - * @return maximal size of string properties in bytes - */ - int getMaxStringSize(); - - /** - * Set the maximal size of binary properties - * @param size size in bytes - */ - void setMaxBlobSize(int size); - - /** - * @return maximal size of binary properties in bytes - */ - int getMaxBlobSize(); - - /** - * Set the maximal number of held references - * @param count maximal number of references - */ - void setMaxReferences(int count); - - /** - * @return maximal number of held references - */ - int getMaxReferences(); - - /** - * Maximal number of write operations per scheduled writer - * @param count maximal number of operations - */ - void setMaxWriteOps(int count); - - /** - * @return maximal number of operations - */ - int getMaxWriteOps(); - - /** - * Set the maximal number of child node of a node - * @param count maximal number of child nodes - */ - void setMaxNodeCount(int count); - - /** - * @return Maximal number of child nodes of a node - */ - int getMaxNodeCount(); - - /** - * Set the maximal number of properties of a node - * @param count maximal number of properties - */ - void setMaxPropertyCount(int count); - - /** - * @return Maximal number of properties of a node - */ - int getMaxPropertyCount(); - - /** - * Set the ration of remove node operations wrt. all other operations. - * @param ratio ratio of node remove operations - */ - void setNodeRemoveRatio(int ratio); - - /** - * @return Ratio of node remove operations - */ - int getNodeRemoveRatio(); - - /** - * Set the ration of remove property operations wrt. all other operations. - * @param ratio ratio of property remove operations - */ - void setPropertyRemoveRatio(int ratio); - - /** - * @return Ratio of property remove operations - */ - int getPropertyRemoveRatio(); - - /** - * Set the ration of add node operations wrt. all other operations. - * @param ratio ratio of node add operations - */ - void setNodeAddRatio(int ratio); - - /** - * @return Ratio of node add operations - */ - int getNodeAddRatio(); - - /** - * Set the ration of add string property operations wrt. all other operations. - * @param ratio ratio of string property add operations - */ - void setAddStringRatio(int ratio); - - /** - * @return Ratio of string property add operations - */ - int getAddStringRatio(); - - /** - * Set the ration of add binary property operations wrt. all other operations. - * @param ratio ratio of binary property add operations - */ - void setAddBinaryRatio(int ratio); - - /** - * @return Ratio of binary property add operations - */ - int getAddBinaryRatio(); - - /** - * Add a reference to the current root or release a held reference. - * @param set add a reference if {@code true}, otherwise release any held reference - */ - void setRootReference(boolean set); - - /** - * @return {@code true} if currently a root reference is being held. {@code false} otherwise. - */ - boolean getRootReference(); - - /** - * Determine whether the compaction map is persisted or in memory - * @return {@code true} if persisted, {@code false} otherwise - */ - boolean getPersistCompactionMap(); - - /** - * @return actual number of concurrent readers - */ - int getReaderCount(); - - /** - * @return actual number of concurrent writers - */ - int getWriterCount(); - - /** - * @return actual number of held references (not including any root reference) - */ - int getReferenceCount(); - - /** - * @return current size of the {@link org.apache.jackrabbit.oak.plugins.segment.file.FileStore} - */ - long getFileStoreSize(); - - /** - * @return current weight of the compaction map - */ - long getCompactionMapWeight(); - - /** - * @return number of record referenced by the keys in this map. - */ - long getRecordCount(); - - /** - * @return number of segments referenced by the keys in this map. - */ - long getSegmentCount(); - - /** - * @return current depth of the compaction map - */ - int getCompactionMapDepth(); - - /** - * @return last error - */ - String getLastError(); - - /** - * @return Number of commits - */ - long getCommitCount(); -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentDataStoreBlobGCIT.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentDataStoreBlobGCIT.java deleted file mode 100644 index f118537..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentDataStoreBlobGCIT.java +++ /dev/null @@ -1,550 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static org.apache.commons.io.FileUtils.byteCountToDisplaySize; -import static org.apache.jackrabbit.oak.commons.FixturesHelper.Fixture.SEGMENT_MK; -import static org.apache.jackrabbit.oak.commons.FixturesHelper.getFixtures; -import static org.apache.jackrabbit.oak.plugins.blob.datastore.SharedDataStoreUtils.SharedStoreRecordType.REPOSITORY; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeTrue; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.Date; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.Callable; -import java.util.concurrent.Executor; -import java.util.concurrent.Executors; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; - -import ch.qos.logback.classic.Level; -import com.google.common.base.Stopwatch; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; -import com.google.common.io.Closeables; -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.filefilter.FileFilterUtils; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.commons.FileIOUtils; -import org.apache.jackrabbit.oak.commons.junit.LogCustomizer; -import org.apache.jackrabbit.oak.plugins.blob.BlobReferenceRetriever; -import org.apache.jackrabbit.oak.plugins.blob.GarbageCollectorFileState; -import org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector; -import org.apache.jackrabbit.oak.plugins.blob.SharedDataStore; -import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; -import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils; -import org.apache.jackrabbit.oak.plugins.blob.datastore.SharedDataStoreUtils; -import org.apache.jackrabbit.oak.plugins.identifier.ClusterRepositoryInfo; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.junit.After; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Tests for SegmentNodeStore DataStore GC - */ -@RunWith(Parameterized.class) -public class SegmentDataStoreBlobGCIT { - private static final Logger log = LoggerFactory.getLogger(SegmentDataStoreBlobGCIT.class); - - @Parameterized.Parameter - public boolean usePersistedMap; - - SegmentNodeStore nodeStore; - FileStore store; - DataStoreBlobStore blobStore; - Date startDate; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - @BeforeClass - public static void assumptions() { - assumeTrue(getFixtures().contains(SEGMENT_MK)); - } - - @Parameterized.Parameters - public static List fixtures() { - return ImmutableList.of(new Boolean[] {true}, new Boolean[] {false}); - } - - protected DataStoreBlobStore getBlobStore(File rootFolder) throws Exception { - return DataStoreUtils.getBlobStore(rootFolder); - } - - protected SegmentNodeStore getNodeStore(BlobStore blobStore) throws Exception { - if (nodeStore == null) { - FileStore.Builder builder = FileStore.builder(getWorkDir()) - .withBlobStore(blobStore).withMaxFileSize(256) - .withCacheSize(64).withMemoryMapping(false); - store = builder.build(); - CompactionStrategy compactionStrategy = - new CompactionStrategy(false, true, - CompactionStrategy.CleanupType.CLEAN_OLD, 0, CompactionStrategy.MEMORY_THRESHOLD_DEFAULT) { - @Override - public boolean compacted(@Nonnull Callable setHead) throws Exception { - return setHead.call(); - } - }; - compactionStrategy.setPersistCompactionMap(usePersistedMap); - store.setCompactionStrategy(compactionStrategy); - nodeStore = SegmentNodeStore.builder(store).build(); - } - return nodeStore; - } - - private File getWorkDir() { - return folder.getRoot(); - } - - public DataStoreState setUp() throws Exception { - return setUp(10); - } - - public DataStoreState setUp(int count) throws Exception { - if (blobStore == null) { - blobStore = getBlobStore(folder.newFolder()); - } - nodeStore = getNodeStore(blobStore); - startDate = new Date(); - - NodeBuilder a = nodeStore.getRoot().builder(); - - /* Create garbage by creating in-lined blobs (size < 16KB) */ - int number = 4000; - NodeBuilder content = a.child("content"); - for (int i = 0; i < number; i++) { - NodeBuilder c = content.child("x" + i); - for (int j = 0; j < 5; j++) { - c.setProperty("p" + j, nodeStore.createBlob(randomStream(j, 16384))); - } - } - nodeStore.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - final long dataSize = store.size(); - log.info("File store dataSize {}", byteCountToDisplaySize(dataSize)); - - // 2. Now remove the nodes to generate garbage - content = a.child("content"); - for (int i = 0; i < 500; i++) { - NodeBuilder c = content.child("x" + i); - for (int j = 0; j < 5; j++) { - c.removeProperty("p" + j); - } - } - nodeStore.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - /* Create and delete nodes with blobs stored in DS*/ - int maxDeleted = 5; - int numBlobs = count; - List processed = Lists.newArrayList(); - Random rand = new Random(); - for (int i = 0; i < maxDeleted; i++) { - int n = rand.nextInt(numBlobs); - if (!processed.contains(n)) { - processed.add(n); - } - } - - DataStoreState state = new DataStoreState(); - for (int i = 0; i < numBlobs; i++) { - SegmentBlob b = (SegmentBlob) nodeStore.createBlob(randomStream(i, 18342)); - Iterator idIter = blobStore.resolveChunks(b.getBlobId()); - while (idIter.hasNext()) { - String chunk = idIter.next(); - state.blobsAdded.add(chunk); - if (!processed.contains(i)) { - state.blobsPresent.add(chunk); - } - } - a.child("c" + i).setProperty("x", b); - } - - nodeStore.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY); - log.info("Created blobs : {}", state.blobsAdded.size()); - - for (int id : processed) { - delete("c" + id); - } - log.info("Deleted nodes : {}", processed.size()); - - // Sleep a little to make eligible for cleanup - TimeUnit.MILLISECONDS.sleep(5); - store.maybeCompact(false); - store.cleanup(); - - return state; - } - - private HashSet addInlined() throws Exception { - HashSet set = new HashSet(); - NodeBuilder a = nodeStore.getRoot().builder(); - int number = 4; - for (int i = 0; i < number; i++) { - Blob b = nodeStore.createBlob(randomStream(i, 16514)); - a.child("cinline" + i).setProperty("x", b); - } - nodeStore.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY); - return set; - } - - private HashSet addNodeSpecialChars() throws Exception { - List specialCharSets = - Lists.newArrayList("q\\%22afdg\\%22", "a\nbcd", "a\n\rabcd", "012\\efg" ); - HashSet set = new HashSet(); - NodeBuilder a = nodeStore.getRoot().builder(); - for (int i = 0; i < specialCharSets.size(); i++) { - SegmentBlob b = (SegmentBlob) nodeStore.createBlob(randomStream(i, 18432)); - NodeBuilder n = a.child("cspecial"); - n.child(specialCharSets.get(i)).setProperty("x", b); - Iterator idIter = blobStore.resolveChunks(b.getBlobId()); - set.addAll(Lists.newArrayList(idIter)); - } - nodeStore.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY); - return set; - } - - private class DataStoreState { - Set blobsAdded = Sets.newHashSet(); - Set blobsPresent = Sets.newHashSet(); - } - - private void delete(String nodeId) throws CommitFailedException { - NodeBuilder builder = nodeStore.getRoot().builder(); - builder.child(nodeId).remove(); - - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - } - - @Test - public void gc() throws Exception { - DataStoreState state = setUp(); - log.info("{} blobs that should remain after gc : {}", state.blobsPresent.size(), state.blobsPresent); - log.info("{} blobs for nodes which are deleted : {}", state.blobsPresent.size(), state.blobsPresent); - Set existingAfterGC = gcInternal(0); - assertTrue(Sets.symmetricDifference(state.blobsPresent, existingAfterGC).isEmpty()); - } - - @Test - public void checkMark() throws Exception { - LogCustomizer customLogs = LogCustomizer - .forLogger(MarkSweepGarbageCollector.class.getName()) - .enable(Level.TRACE) - .filter(Level.TRACE) - .create(); - - DataStoreState state = setUp(5); - log.info("{} blobs available : {}", state.blobsPresent.size(), state.blobsPresent); - customLogs.starting(); - ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); - String rootFolder = folder.newFolder().getAbsolutePath(); - MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder); - gcObj.collectGarbage(true); - customLogs.finished(); - - assertBlobReferenceRecords(state.blobsPresent, rootFolder); - } - - @Test - public void noGc() throws Exception { - DataStoreState state = setUp(); - log.info("{} blobs that should remain after gc : {}", state.blobsAdded.size(), state.blobsAdded); - log.info("{} blobs for nodes which are deleted : {}", state.blobsPresent.size(), state.blobsPresent); - Set existingAfterGC = gcInternal(86400); - assertTrue(Sets.symmetricDifference(state.blobsAdded, existingAfterGC).isEmpty()); - } - - @Test - public void gcSpecialChar() throws Exception { - DataStoreState state = setUp(); - Set specialCharNodeBlobs = addNodeSpecialChars(); - state.blobsAdded.addAll(specialCharNodeBlobs); - state.blobsPresent.addAll(specialCharNodeBlobs); - Set existingAfterGC = gcInternal(0); - assertTrue(Sets.symmetricDifference(state.blobsPresent, existingAfterGC).isEmpty()); - } - - @Test - public void consistencyCheckInit() throws Exception { - DataStoreState state = setUp(); - ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); - MarkSweepGarbageCollector gcObj = init(86400, executor); - long candidates = gcObj.checkConsistency(); - assertEquals(1, executor.getTaskCount()); - assertEquals(0, candidates); - } - - @Test - public void consistencyCheckWithGc() throws Exception { - DataStoreState state = setUp(); - Set existingAfterGC = gcInternal(0); - assertTrue(Sets.symmetricDifference(state.blobsPresent, existingAfterGC).isEmpty()); - - ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); - MarkSweepGarbageCollector gcObj = init(86400, executor); - long candidates = gcObj.checkConsistency(); - assertEquals(1, executor.getTaskCount()); - assertEquals(0, candidates); - } - - @Test - public void consistencyCheckWithRenegadeDelete() throws Exception { - DataStoreState state = setUp(); - - // Simulate faulty state by deleting some blobs directly - Random rand = new Random(87); - List existing = Lists.newArrayList(state.blobsPresent); - - long count = blobStore.countDeleteChunks(ImmutableList.of(existing.get(rand.nextInt(existing.size()))), 0); - - ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); - MarkSweepGarbageCollector gcObj = init(86400, executor); - long candidates = gcObj.checkConsistency(); - assertEquals(1, executor.getTaskCount()); - assertEquals(count, candidates); - } - - @Test - public void gcLongRunningBlobCollection() throws Exception { - DataStoreState state = setUp(); - log.info("{} Blobs added {}", state.blobsAdded.size(), state.blobsAdded); - log.info("{} Blobs should be present {}", state.blobsPresent.size(), state.blobsPresent); - - ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); - String repoId = null; - if (SharedDataStoreUtils.isShared(store.getBlobStore())) { - repoId = ClusterRepositoryInfo.getOrCreateId(nodeStore); - ((SharedDataStore) store.getBlobStore()).addMetadataRecord( - new ByteArrayInputStream(new byte[0]), - REPOSITORY.getNameFromId(repoId)); - } - TestGarbageCollector gc = new TestGarbageCollector( - new SegmentBlobReferenceRetriever(store.getTracker()), - (GarbageCollectableBlobStore) store.getBlobStore(), executor, folder.newFolder().getAbsolutePath(), 5, 5000, repoId); - gc.collectGarbage(false); - Set existingAfterGC = iterate(); - log.info("{} Blobs existing after gc {}", existingAfterGC.size(), existingAfterGC); - - assertTrue(Sets.difference(state.blobsPresent, existingAfterGC).isEmpty()); - assertEquals(gc.additionalBlobs, Sets.symmetricDifference(state.blobsPresent, existingAfterGC)); - } - - @Test - public void gcWithInlined() throws Exception { - blobStore = new DataStoreBlobStore(DataStoreUtils.createFDS(new File(getWorkDir(), "datastore"), 16516)); - DataStoreState state = setUp(); - addInlined(); - log.info("{} blobs that should remain after gc : {}", state.blobsAdded.size(), state.blobsAdded); - log.info("{} blobs for nodes which are deleted : {}", state.blobsPresent.size(), state.blobsPresent); - Set existingAfterGC = gcInternal(0); - assertTrue(Sets.symmetricDifference(state.blobsPresent, existingAfterGC).isEmpty()); - } - - @Test - public void consistencyCheckInlined() throws Exception { - blobStore = new DataStoreBlobStore(DataStoreUtils.createFDS(new File(getWorkDir(), "datastore"), 16516)); - DataStoreState state = setUp(); - addInlined(); - ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); - MarkSweepGarbageCollector gcObj = init(86400, executor); - long candidates = gcObj.checkConsistency(); - assertEquals(1, executor.getTaskCount()); - assertEquals(0, candidates); - } - - private Set gcInternal(long maxBlobGcInSecs) throws Exception { - ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); - MarkSweepGarbageCollector gc = init(maxBlobGcInSecs, executor); - gc.collectGarbage(false); - - assertEquals(0, executor.getTaskCount()); - Set existingAfterGC = iterate(); - log.info("{} blobs existing after gc : {}", existingAfterGC.size(), existingAfterGC); - return existingAfterGC; - } - - private static void assertBlobReferenceRecords(Set expected, String rootFolder) throws IOException { - // Read the marked files to check if paths logged or not - File root = new File(rootFolder); - List rootFile = FileFilterUtils.filterList( - FileFilterUtils.prefixFileFilter("gcworkdir-"), - root.listFiles()); - List markedFiles = FileFilterUtils.filterList( - FileFilterUtils.prefixFileFilter("marked-"), - rootFile.get(0).listFiles()); - InputStream is = null; - try { - is = new FileInputStream(markedFiles.get(0)); - Set records = FileIOUtils.readStringsAsSet(is, true); - assertEquals(expected, records); - } finally { - Closeables.close(is, false); - FileUtils.forceDelete(rootFile.get(0)); - } - } - - private MarkSweepGarbageCollector init(long blobGcMaxAgeInSecs, ThreadPoolExecutor executor) - throws Exception { - return init(blobGcMaxAgeInSecs, executor, folder.newFolder().getAbsolutePath()); - } - - private MarkSweepGarbageCollector init(long blobGcMaxAgeInSecs, ThreadPoolExecutor executor, - String root) throws Exception { - String repoId = null; - if (SharedDataStoreUtils.isShared(store.getBlobStore())) { - repoId = ClusterRepositoryInfo.getOrCreateId(nodeStore); - ((SharedDataStore) store.getBlobStore()).addMetadataRecord( - new ByteArrayInputStream(new byte[0]), - REPOSITORY.getNameFromId(repoId)); - } - MarkSweepGarbageCollector gc = - new MarkSweepGarbageCollector(new SegmentBlobReferenceRetriever(store.getTracker()), - (GarbageCollectableBlobStore) store.getBlobStore(), executor, - root, 2048, blobGcMaxAgeInSecs, repoId); - return gc; - } - - protected Set iterate() throws Exception { - Iterator cur = blobStore.getAllChunkIds(0); - - Set existing = Sets.newHashSet(); - while (cur.hasNext()) { - existing.add(cur.next()); - } - return existing; - } - - @After - public void close() throws Exception { - if (store != null) { - store.close(); - } - } - - static InputStream randomStream(int seed, int size) { - Random r = new Random(seed); - byte[] data = new byte[size]; - r.nextBytes(data); - return new ByteArrayInputStream(data); - } - - /** - * Waits for some time and adds additional blobs after blob referenced identified to simulate - * long running blob id collection phase. - */ - class TestGarbageCollector extends MarkSweepGarbageCollector { - long maxLastModifiedInterval; - String root; - GarbageCollectableBlobStore blobStore; - Set additionalBlobs; - - public TestGarbageCollector(BlobReferenceRetriever marker, GarbageCollectableBlobStore blobStore, - Executor executor, String root, int batchCount, long maxLastModifiedInterval, - @Nullable String repositoryId) throws IOException { - super(marker, blobStore, executor, root, batchCount, maxLastModifiedInterval, repositoryId); - this.root = root; - this.blobStore = blobStore; - this.maxLastModifiedInterval = maxLastModifiedInterval; - this.additionalBlobs = Sets.newHashSet(); - } - - @Override - protected void markAndSweep(boolean markOnly, boolean forceBlobRetrieve) throws Exception { - boolean threw = true; - GarbageCollectorFileState fs = new GarbageCollectorFileState(root); - try { - Stopwatch sw = Stopwatch.createStarted(); - LOG.info("Starting Test Blob garbage collection"); - - // Sleep a little more than the max interval to get over the interval for valid blobs - Thread.sleep(maxLastModifiedInterval + 100); - LOG.info("Slept {} to make blobs old", maxLastModifiedInterval + 100); - - long markStart = System.currentTimeMillis(); - mark(fs); - LOG.info("Mark finished"); - - additionalBlobs = createAdditional(); - - if (!markOnly) { - Thread.sleep(maxLastModifiedInterval + 100); - LOG.info("Slept {} to make additional blobs old", maxLastModifiedInterval + 100); - - long deleteCount = sweep(fs, markStart, forceBlobRetrieve); - threw = false; - - LOG.info("Blob garbage collection completed in {}. Number of blobs deleted [{}]", sw.toString(), - deleteCount, maxLastModifiedInterval); - } - } finally { - if (!LOG.isTraceEnabled()) { - Closeables.close(fs, threw); - } - } - } - - public HashSet createAdditional() throws Exception { - HashSet blobSet = new HashSet(); - NodeBuilder a = nodeStore.getRoot().builder(); - int number = 5; - for (int i = 0; i < number; i++) { - SegmentBlob b = (SegmentBlob) nodeStore.createBlob(randomStream(100 + i, 16516)); - a.child("cafter" + i).setProperty("x", b); - Iterator idIter = - ((GarbageCollectableBlobStore) blobStore).resolveChunks(b.getBlobId()); - while (idIter.hasNext()) { - String chunk = idIter.next(); - blobSet.add(chunk); - } - } - log.info("{} Additional created {}", blobSet.size(), blobSet); - - nodeStore.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY); - return blobSet; - } - } -} - diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentGraphTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentGraphTest.java deleted file mode 100644 index 62d73c0..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentGraphTest.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Maps.newHashMap; -import static com.google.common.collect.Sets.newHashSet; -import static java.util.Collections.singleton; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentGraph.createRegExpFilter; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentGraph.parseSegmentGraph; -import static org.junit.Assert.assertEquals; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.UUID; -import java.util.zip.ZipEntry; -import java.util.zip.ZipInputStream; - -import com.google.common.base.Predicate; -import com.google.common.base.Predicates; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Multiset; -import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.oak.plugins.segment.SegmentGraph.Graph; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore.ReadOnlyStore; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -public class SegmentGraphTest { - private final Set segments = newHashSet( - UUID.fromString("5be0c2ea-b6ba-4f80-acad-657a20f920b6"), - UUID.fromString("fdaca71e-f71e-4f19-abf5-144e8c85f9e3"), - UUID.fromString("53be3b93-87fa-487f-a2fc-7c17e639c231"), - UUID.fromString("2eae0bc2-d3dd-4ba4-a765-70c38073437d"), - UUID.fromString("ab61b8c9-222c-4119-a73b-5f61c0bc4741"), - UUID.fromString("38c42dde-5928-4cc3-a483-37185d6971e4") - ); - - private final Map> references = ImmutableMap.>of( - UUID.fromString("5be0c2ea-b6ba-4f80-acad-657a20f920b6"), - newHashSet(UUID.fromString("2eae0bc2-d3dd-4ba4-a765-70c38073437d")), - UUID.fromString("fdaca71e-f71e-4f19-abf5-144e8c85f9e3"), - newHashSet(UUID.fromString("ab61b8c9-222c-4119-a73b-5f61c0bc4741")), - UUID.fromString("2eae0bc2-d3dd-4ba4-a765-70c38073437d"), - newHashSet(UUID.fromString("2fdaca71e-f71e-4f19-abf5-144e8c85f9e3"), - UUID.fromString("ab61b8c9-222c-4119-a73b-5f61c0bc4741")) - ); - - private final Set filteredSegments = newHashSet( - UUID.fromString("fdaca71e-f71e-4f19-abf5-144e8c85f9e3"), - UUID.fromString("2eae0bc2-d3dd-4ba4-a765-70c38073437d"), - UUID.fromString("ab61b8c9-222c-4119-a73b-5f61c0bc4741") - ); - - private final Map> filteredReferences = ImmutableMap.>of( - UUID.fromString("fdaca71e-f71e-4f19-abf5-144e8c85f9e3"), - newHashSet(UUID.fromString("ab61b8c9-222c-4119-a73b-5f61c0bc4741")), - UUID.fromString("2eae0bc2-d3dd-4ba4-a765-70c38073437d"), - newHashSet(UUID.fromString("2fdaca71e-f71e-4f19-abf5-144e8c85f9e3"), - UUID.fromString("ab61b8c9-222c-4119-a73b-5f61c0bc4741")) - ); - - private final Set gcGenerations = newHashSet("0", "1"); - private final Map> gcReferences = ImmutableMap.of( - "0", singleton("0"), - "1", singleton("0") - ); - - @Rule - public TemporaryFolder storeFolder = new TemporaryFolder(new File("target")); - - private File getStoreFolder() { - return storeFolder.getRoot(); - } - - @Before - public void setup() throws IOException { - System.out.println(getStoreFolder()); - unzip(SegmentGraphTest.class.getResourceAsStream("file-store.zip"), getStoreFolder()); - } - - @Test - public void testSegmentGraph() throws Exception { - ReadOnlyStore store = FileStore.builder(getStoreFolder()).buildReadOnly(); - try { - Graph segmentGraph = parseSegmentGraph(store, Predicates.alwaysTrue()); - assertEquals(segments, newHashSet(segmentGraph.vertices())); - Map> map = newHashMap(); - for (Entry> entry : segmentGraph.edges()) { - map.put(entry.getKey(), entry.getValue().elementSet()); - } - assertEquals(references, map); - } finally { - store.close(); - } - } - - @Test - public void testSegmentGraphWithFilter() throws Exception { - ReadOnlyStore store = FileStore.builder(getStoreFolder()).buildReadOnly(); - try { - Predicate filter = createRegExpFilter(".*testWriter.*", store.getTracker()); - Graph segmentGraph = parseSegmentGraph(store, filter); - assertEquals(filteredSegments, newHashSet(segmentGraph.vertices())); - Map> map = newHashMap(); - for (Entry> entry : segmentGraph.edges()) { - map.put(entry.getKey(), entry.getValue().elementSet()); - } - assertEquals(filteredReferences, map); - } finally { - store.close(); - } - } - - @Test - public void testGCGraph() throws Exception { - ReadOnlyStore store = FileStore.builder(getStoreFolder()).buildReadOnly(); - try { - Graph gcGraph = SegmentGraph.parseGCGraph(store); - assertEquals(gcGenerations, newHashSet(gcGraph.vertices())); - Map> map = newHashMap(); - for (Entry> entry : gcGraph.edges()) { - map.put(entry.getKey(), entry.getValue().elementSet()); - } - assertEquals(gcReferences, map); - } finally { - store.close(); - } - } - - private static void unzip(InputStream is, File target) throws IOException { - ZipInputStream zis = new ZipInputStream(is); - try { - for (ZipEntry entry = zis.getNextEntry(); entry != null; entry = zis.getNextEntry()) { - OutputStream out = new FileOutputStream(new File(target, entry.getName())); - try { - IOUtils.copy(zis, out); - } finally { - out.close(); - } - } - } finally { - zis.close(); - } - } -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdFactoryTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdFactoryTest.java deleted file mode 100644 index dcd757c..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdFactoryTest.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static junit.framework.Assert.assertFalse; -import static junit.framework.Assert.assertTrue; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Set; - -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; -import org.junit.Test; - -public class SegmentIdFactoryTest { - - private final SegmentTracker factory; - - public SegmentIdFactoryTest() throws IOException { - factory = new MemoryStore().getTracker(); - } - - @Test - public void segmentIdType() { - assertTrue(factory.newDataSegmentId().isDataSegmentId()); - assertTrue(factory.newBulkSegmentId().isBulkSegmentId()); - - assertFalse(factory.newDataSegmentId().isBulkSegmentId()); - assertFalse(factory.newBulkSegmentId().isDataSegmentId()); - } - - @Test - public void internedSegmentIds() { - assertTrue(factory.getSegmentId(0, 0) == factory.getSegmentId(0, 0)); - assertTrue(factory.getSegmentId(1, 2) == factory.getSegmentId(1, 2)); - assertTrue(factory.getSegmentId(1, 2) != factory.getSegmentId(3, 4)); - } - - @Test - public void referencedSegmentIds() throws InterruptedException { - SegmentId a = factory.newDataSegmentId(); - SegmentId b = factory.newBulkSegmentId(); - SegmentId c = factory.newDataSegmentId(); - - Set ids = factory.getReferencedSegmentIds(); - assertTrue(ids.contains(a)); - assertTrue(ids.contains(b)); - assertTrue(ids.contains(c)); - - // the returned set is a snapshot in time, not continuously updated - assertFalse(ids.contains(factory.newBulkSegmentId())); - } - - /** - * This test can't be enabled in general, as gc() contract is too - * weak for this to work reliably. But it's a good manual check for - * the correct operation of the tracking of segment id references. - */ - // @Test - public void garbageCollection() { - SegmentId a = factory.newDataSegmentId(); - SegmentId b = factory.newBulkSegmentId(); - - // generate lots of garbage copies of an UUID to get the - // garbage collector to reclaim also the original instance - for (int i = 0; i < 1000000; i++) { - a = new SegmentId( - null, a.getMostSignificantBits(), a.getLeastSignificantBits()); - } - System.gc(); - - // now the original UUID should no longer be present - Set ids = factory.getReferencedSegmentIds(); - assertFalse(ids.contains(a)); - assertTrue(ids.contains(b)); - } - - /** - * OAK-2049 - error for data segments - */ - @Test(expected = IllegalStateException.class) - public void dataAIOOBE() { - SegmentId id = factory.newDataSegmentId(); - byte[] buffer = SegmentBufferWriter.createNewBuffer(SegmentVersion.V_11); - ByteBuffer data = ByteBuffer.allocate(Segment.MAX_SEGMENT_SIZE); - data.put(buffer); - data.rewind(); - Segment s = new Segment(factory, id, data); - s.getRefId(1); - } - - /** - * OAK-2049 - error for bulk segments - */ - @Test(expected = IllegalStateException.class) - public void bulkAIOOBE() { - SegmentId id = factory.newBulkSegmentId(); - ByteBuffer data = ByteBuffer.allocate(4); - Segment s = new Segment(factory, id, data); - s.getRefId(1); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdTableBenchmark.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdTableBenchmark.java deleted file mode 100644 index 2c62d66..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdTableBenchmark.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import java.io.IOException; -import java.lang.ref.WeakReference; -import java.util.Random; - -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; - -public class SegmentIdTableBenchmark { - public static void main(String... args) throws IOException { - test(); - test(); - test(); - test(); - test(); - test(); - } - - private static void test() throws IOException { - long time; - int repeat = 10000; - int count = 10000; - - long[] array = new long[count]; - Random r = new Random(1); - for (int i = 0; i < array.length; i++) { - array[i] = r.nextLong(); - } - - time = System.currentTimeMillis(); - SegmentTracker tracker = new MemoryStore().getTracker(); - final SegmentIdTable tbl = new SegmentIdTable(tracker); - for (int i = 0; i < repeat; i++) { - for (int j = 0; j < count; j++) { - tbl.getSegmentId(j, array[j]); - } - } - time = System.currentTimeMillis() - time; - System.out.println("SegmentIdTable: " + time); - - time = System.currentTimeMillis(); - ConcurrentTable cm = new ConcurrentTable(tracker, 16 * 1024); - for (int i = 0; i < repeat; i++) { - for (int j = 0; j < count; j++) { - cm.getSegmentId(j, array[j]); - } - } - time = System.currentTimeMillis() - time; - System.out.println("ConcurrentTable: " + time); - -// time = System.currentTimeMillis(); -// WeakHashMap map = new WeakHashMap(count); -// for (int i = 0; i < repeat; i++) { -// for (int j = 0; j < count; j++) { -// SegmentId id = new SegmentId(tracker, j, j); -// if (map.get(id) == null) { -// map.put(id, id); -// } -// } -// } -// time = System.currentTimeMillis() - time; -// System.out.println("WeakHashMap: " + time); - } - - static class ConcurrentTable { - private final SegmentTracker tracker; - volatile WeakReference[] map; - @SuppressWarnings("unchecked") - ConcurrentTable(SegmentTracker tracker, int size) { - this.tracker = tracker; - map = (WeakReference[]) new WeakReference[size]; - } - SegmentId getSegmentId(long a, long b) { - outer: - while (true) { - int increment = 1; - WeakReference[] m = map; - int length = m.length; - int index = (int) (b & (length - 1)); - while (true) { - WeakReference ref = m[index]; - if (ref == null) { - SegmentId id = new SegmentId(tracker, a, b); - ref = new WeakReference(id); - m[index] = ref; - if (m != map) { - continue outer; - } - return id; - } - SegmentId id = ref.get(); - if (id != null) { - if (id.getMostSignificantBits() == a && id.getLeastSignificantBits() == b) { - return id; - } - } - // guaranteed to work for power of 2 table sizes, see - // http://stackoverflow.com/questions/2348187/moving-from-linear-probing-to-quadratic-probing-hash-collisons - // http://stackoverflow.com/questions/12121217/limit-for-quadratic-probing-a-hash-table - index = (index + increment) & (length - 1); - increment++; - if (increment > 100) { - System.out.println("inc " + increment); - } - } - } - } - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdTableTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdTableTest.java deleted file mode 100644 index 2f652b8..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentIdTableTest.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Random; -import java.util.concurrent.Callable; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; - -import javax.annotation.Nonnull; - -import junit.framework.Assert; - -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.CleanupType; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; -import org.junit.Test; - -public class SegmentIdTableTest { - - /** - * OAK-2752 - */ - @Test - public void endlessSearchLoop() throws IOException { - SegmentTracker tracker = new MemoryStore().getTracker(); - final SegmentIdTable tbl = new SegmentIdTable(tracker); - - List refs = new ArrayList(); - for (int i = 0; i < 1024; i++) { - refs.add(tbl.getSegmentId(i, i % 64)); - } - - Callable c = new Callable() { - - @Override - public SegmentId call() throws Exception { - // (2,1) doesn't exist - return tbl.getSegmentId(2, 1); - } - }; - Future f = Executors.newSingleThreadExecutor().submit(c); - SegmentId s = null; - try { - s = f.get(5, TimeUnit.SECONDS); - } catch (Exception e) { - Assert.fail(e.getMessage()); - } - Assert.assertNotNull(s); - Assert.assertEquals(2, s.getMostSignificantBits()); - Assert.assertEquals(1, s.getLeastSignificantBits()); - } - - @Test - public void randomized() throws IOException { - SegmentTracker tracker = new MemoryStore().getTracker(); - final SegmentIdTable tbl = new SegmentIdTable(tracker); - - List refs = new ArrayList(); - Random r = new Random(1); - for (int i = 0; i < 16 * 1024; i++) { - refs.add(tbl.getSegmentId(r.nextLong(), r.nextLong())); - } - Assert.assertEquals(16 * 1024, tbl.getEntryCount()); - Assert.assertEquals(16 * 2048, tbl.getMapSize()); - Assert.assertEquals(5, tbl.getMapRebuildCount()); - - r = new Random(1); - for (int i = 0; i < 16 * 1024; i++) { - refs.add(tbl.getSegmentId(r.nextLong(), r.nextLong())); - Assert.assertEquals(16 * 1024, tbl.getEntryCount()); - Assert.assertEquals(16 * 2048, tbl.getMapSize()); - Assert.assertEquals(5, tbl.getMapRebuildCount()); - } - } - - @Test - public void clearTable() throws IOException { - SegmentTracker tracker = new MemoryStore().getTracker(); - final SegmentIdTable tbl = new SegmentIdTable(tracker); - - List refs = new ArrayList(); - int originalCount = 8; - for (int i = 0; i < originalCount; i++) { - refs.add(tbl.getSegmentId(i, i % 2)); - } - Assert.assertEquals(originalCount, tbl.getEntryCount()); - Assert.assertEquals(0, tbl.getMapRebuildCount()); - - tbl.clearSegmentIdTables(new CompactionStrategy(false, false, - CleanupType.CLEAN_NONE, originalCount, (byte) 0) { - - @Override - public boolean compacted(@Nonnull Callable setHead) - throws Exception { - return true; - } - - @Override - public boolean canRemove(SegmentId id) { - return id.getMostSignificantBits() < 4; - } - - }); - - Assert.assertEquals(4, tbl.getEntryCount()); - - for (SegmentId id : refs) { - if (id.getMostSignificantBits() >= 4) { - SegmentId id2 = tbl.getSegmentId( - id.getMostSignificantBits(), - id.getLeastSignificantBits()); - List list = tbl.getRawSegmentIdList(); - if (list.size() != new HashSet(list).size()) { - Collections.sort(list); - fail("duplicate entry " + list.toString()); - } - Assert.assertTrue(id == id2); - } - } - } - - @Test - public void justHashCollisions() throws IOException { - SegmentTracker tracker = new MemoryStore().getTracker(); - final SegmentIdTable tbl = new SegmentIdTable(tracker); - - List refs = new ArrayList(); - int originalCount = 1024; - for (int i = 0; i < originalCount; i++) { - // modulo 128 to ensure we have conflicts - refs.add(tbl.getSegmentId(i, i % 128)); - } - Assert.assertEquals(originalCount, tbl.getEntryCount()); - Assert.assertEquals(1, tbl.getMapRebuildCount()); - - List refs2 = new ArrayList(); - tbl.collectReferencedIds(refs2); - Assert.assertEquals(refs.size(), refs2.size()); - - Assert.assertEquals(originalCount, tbl.getEntryCount()); - // we don't expect that there was a refresh, - // because there were just hash collisions - Assert.assertEquals(1, tbl.getMapRebuildCount()); - } - - @Test - public void gc() throws IOException { - SegmentTracker tracker = new MemoryStore().getTracker(); - final SegmentIdTable tbl = new SegmentIdTable(tracker); - - List refs = new ArrayList(); - int originalCount = 1024; - for (int i = 0; i < originalCount; i++) { - // modulo 128 to ensure we have conflicts - refs.add(tbl.getSegmentId(i, i % 128)); - } - Assert.assertEquals(originalCount, tbl.getEntryCount()); - Assert.assertEquals(1, tbl.getMapRebuildCount()); - - for (int i = 0; i < refs.size() / 2; i++) { - // we need to remove the first entries, - // because if we remove the last entries, then - // getSegmentId would not detect that entries were freed up - refs.remove(0); - } - for (int gcCalls = 0;; gcCalls++) { - // needed here, so some entries can be garbage collected - System.gc(); - - for (SegmentId id : refs) { - SegmentId id2 = tbl.getSegmentId(id.getMostSignificantBits(), id.getLeastSignificantBits()); - Assert.assertTrue(id2 == id); - } - // because we found each entry, we expect the refresh count is the same - Assert.assertEquals(1, tbl.getMapRebuildCount()); - - // even thought this does not increase the entry count a lot, - // it is supposed to detect that entries were removed, - // and force a refresh, which would get rid of the unreferenced ids - for (int i = 0; i < 10; i++) { - tbl.getSegmentId(i, i); - } - - if (tbl.getEntryCount() < originalCount) { - break; - } else if (gcCalls > 10) { - fail("No entries were garbage collected after 10 times System.gc()"); - } - } - Assert.assertEquals(2, tbl.getMapRebuildCount()); - } -} \ No newline at end of file diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStoreFactoryTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStoreFactoryTest.java deleted file mode 100644 index 52175c8..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStoreFactoryTest.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import org.apache.jackrabbit.oak.spi.state.NodeStoreProvider; -import org.junit.Ignore; -import org.junit.Test; - -import java.util.Map; - -import static com.google.common.collect.Maps.newHashMap; -import static org.apache.sling.testing.mock.osgi.MockOsgi.deactivate; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; - -public class SegmentNodeStoreFactoryTest extends SegmentNodeStoreServiceTest { - - private SegmentNodeStoreFactory segmentNodeStoreFactory; - - @Test - @Ignore - public void nodeStoreProvider() throws Exception { - } - - @Override - protected void registerSegmentNodeStoreService(boolean customBlobStore) { - Map properties = newHashMap(); - - properties.put(SegmentNodeStoreFactory.ROLE, "some-role"); - properties.put(SegmentNodeStoreFactory.CUSTOM_BLOB_STORE, customBlobStore); - properties.put(SegmentNodeStoreFactory.DIRECTORY, folder.getRoot().getAbsolutePath()); - - segmentNodeStoreFactory = context.registerInjectActivateService(new SegmentNodeStoreFactory(), properties); - } - - @Override - protected void unregisterSegmentNodeStoreService() { - deactivate(segmentNodeStoreFactory); - } - - @Override - protected void assertServiceActivated() { - assertNotNull(context.getService(NodeStoreProvider.class)); - } - - @Override - protected void assertServiceNotActivated() { - assertNull(context.getService(NodeStoreProvider.class)); - } -} \ No newline at end of file diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStoreServiceTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStoreServiceTest.java deleted file mode 100644 index a8c8693..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStoreServiceTest.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Maps.newHashMap; -import static org.apache.sling.testing.mock.osgi.MockOsgi.deactivate; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.mockito.Mockito.mock; - -import java.io.File; -import java.util.Map; - -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.apache.jackrabbit.oak.spi.state.NodeStoreProvider; -import org.apache.jackrabbit.oak.stats.StatisticsProvider; -import org.apache.sling.testing.mock.osgi.junit.OsgiContext; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.osgi.framework.ServiceRegistration; - -public class SegmentNodeStoreServiceTest { - - @Rule - public OsgiContext context = new OsgiContext(); - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - @Before - public void setUp(){ - context.registerService(StatisticsProvider.class, StatisticsProvider.NOOP); - } - - /** - * A NodeStore service should be registered when a BlobStore service is not - * available and the "customBlobStore" configuration property is false. - */ - @Test - public void testNoCustomBlobStoreWithoutBlobStore() { - registerSegmentNodeStoreService(false); - assertServiceActivated(); - - unregisterSegmentNodeStoreService(); - } - - /** - * A NodeStore service should be registered when a BlobStore service is not - * available but the "customBlobStore" configuration property is false. - */ - @Test - public void testNoCustomBlobStoreWithBlobStore() { - registerBlobStore(); - - registerSegmentNodeStoreService(false); - assertServiceActivated(); - - unregisterSegmentNodeStoreService(); - unregisterBlobStore(); - } - - /** - * A NodeStore service should not be registered when the "customBlobStore" - * configuration property is true but a BlobStore service is not available. - */ - @Test - public void testUseCustomBlobStoreWithoutBlobStore() { - registerSegmentNodeStoreService(true); - assertServiceNotActivated(); - - unregisterSegmentNodeStoreService(); - } - - /** - * A NodeStore service should be registered when the "customBlobStore" - * configuration property is true and a BlobStore service is available. - */ - @Test - public void testUseCustomBlobStoreWithBlobStore() { - registerBlobStore(); - - registerSegmentNodeStoreService(true); - assertServiceActivated(); - - unregisterSegmentNodeStoreService(); - unregisterBlobStore(); - } - - /** - * A NodeStore service should be registered when the "customBlobStore" - * configuration property is true and a BlobStore service becomes - * dynamically available. - */ - @Test - public void testUseCustomBlobStoreWithDynamicBlobStoreActivation() { - registerSegmentNodeStoreService(true); - assertServiceNotActivated(); - - registerBlobStore(); - assertServiceActivated(); - - unregisterSegmentNodeStoreService(); - unregisterBlobStore(); - } - - /** - * A NodeStore service should be unregistered when the "customBlobStore" - * configuration property is true and a BlobStore service becomes - * dynamically unavailable. - */ - @Test - public void testUseCustomBlobStoreWithDynamicBlobStoreDeactivation() { - registerBlobStore(); - - registerSegmentNodeStoreService(true); - assertServiceActivated(); - - unregisterBlobStore(); - assertServiceNotActivated(); - - unregisterSegmentNodeStoreService(); - } - - @Test - public void nodeStoreProvider() throws Exception{ - Map properties = newHashMap(); - properties.put(SegmentNodeStoreService.SECONDARY_STORE, true); - properties.put(SegmentNodeStoreService.DIRECTORY, folder.getRoot().getAbsolutePath()); - context.registerService(BlobStore.class, new MemoryBlobStore()); - - segmentNodeStoreService = context.registerInjectActivateService(new SegmentNodeStoreService(), properties); - assertNull(context.getService(NodeStore.class)); - assertNotNull(context.getService(NodeStoreProvider.class)); - } - - private SegmentNodeStoreService segmentNodeStoreService; - - protected void registerSegmentNodeStoreService(boolean customBlobStore) { - Map properties = newHashMap(); - - properties.put(SegmentNodeStoreService.CUSTOM_BLOB_STORE, customBlobStore); - properties.put(SegmentNodeStoreService.DIRECTORY, folder.getRoot().getAbsolutePath()); - - segmentNodeStoreService = context.registerInjectActivateService(new SegmentNodeStoreService(), properties); - } - - protected void unregisterSegmentNodeStoreService() { - deactivate(segmentNodeStoreService); - } - - private ServiceRegistration blobStore; - - private void registerBlobStore() { - blobStore = context.bundleContext().registerService(BlobStore.class.getName(), mock(BlobStore.class), null); - } - - private void unregisterBlobStore() { - blobStore.unregister(); - } - - protected void assertServiceActivated() { - assertNotNull(context.getService(NodeStore.class)); - assertNotNull(context.getService(SegmentStoreProvider.class)); - } - - protected void assertServiceNotActivated() { - assertNull(context.getService(NodeStore.class)); - assertNull(context.getService(SegmentStoreProvider.class)); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentOverflowExceptionIT.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentOverflowExceptionIT.java deleted file mode 100644 index 1405048..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentOverflowExceptionIT.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static org.apache.commons.io.FileUtils.deleteDirectory; -import static org.apache.commons.lang.RandomStringUtils.randomAlphabetic; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.CleanupType.CLEAN_OLD; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.MEMORY_THRESHOLD_DEFAULT; -import static org.junit.Assume.assumeTrue; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.util.Random; -import java.util.concurrent.Callable; - -import javax.annotation.Nonnull; - -import com.google.common.collect.Iterables; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.gc.GCMonitor; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - *

Tests verifying if the repository gets corrupted or not: {@code OAK-2662 SegmentOverflowException in HeavyWriteIT on Jenkins}

- * - *

This test will run for one hour unless it fails, thus it is disabled by default. On the - * command line specify {@code -DSegmentOverflowExceptionIT=true} to enable it. To specify a different - * time out {@code t} value use {@code -Dtimeout=t} - *

- * - *

If you only want to run this test:
- * {@code mvn verify -Dsurefire.skip.ut=true -PintegrationTesting -Dit.test=SegmentOverflowExceptionIT -DSegmentOverflowExceptionIT=true} - *

- */ -public class SegmentOverflowExceptionIT { - private static final Logger LOG = LoggerFactory - .getLogger(SegmentOverflowExceptionIT.class); - private static final boolean ENABLED = Boolean - .getBoolean(SegmentOverflowExceptionIT.class.getSimpleName()); - private static final long TIMEOUT = Long - .getLong("timeout", 60*60*1000); - - private final Random rnd = new Random(); - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - private File getFileStoreFolder() { - return folder.getRoot(); - } - - @Before - public void setUp() throws IOException { - assumeTrue(ENABLED); - } - - private volatile boolean compact = true; - - private final GCMonitor gcMonitor = new GCMonitor.Empty() { - @Override - public void skipped(String reason, Object... arguments) { - compact = true; - } - - @Override - public void cleaned(long reclaimedSize, long currentSize) { - compact = true; - } - }; - - @Test - public void run() throws Exception { - FileStore fileStore = FileStore.builder(getFileStoreFolder()).withGCMonitor(gcMonitor).build(); - try { - final SegmentNodeStore nodeStore = SegmentNodeStore.builder(fileStore).build(); - fileStore.setCompactionStrategy(new CompactionStrategy(false, false, CLEAN_OLD, 1000, MEMORY_THRESHOLD_DEFAULT) { - @Override - public boolean compacted(@Nonnull Callable setHead) throws Exception { - return nodeStore.locked(setHead); - } - }); - - long start = System.currentTimeMillis(); - int snfeCount = 0; - while (System.currentTimeMillis() - start < TIMEOUT) { - try { - NodeBuilder root = nodeStore.getRoot().builder(); - while (rnd.nextInt(100) != 0) { - modify(nodeStore, root); - } - nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - if (compact) { - compact = false; - fileStore.maybeCompact(true); - } - } catch (SegmentNotFoundException snfe) { - // Usually this can be ignored as SNFEs are somewhat expected here - // due the small retention value for segments. - if (snfeCount++ > 100) { - throw snfe; - } - } - } - } finally { - fileStore.close(); - } - } - - private void modify(NodeStore nodeStore, NodeBuilder nodeBuilder) throws IOException { - int k = rnd.nextInt(100); - if (k < 10) { - if (!nodeBuilder.remove()) { - descent(nodeStore, nodeBuilder); - } - } else if (k < 40) { - nodeBuilder.setChildNode("N" + rnd.nextInt(1000)); - } else if (k < 80) { - nodeBuilder.setProperty("P" + rnd.nextInt(1000), randomAlphabetic(rnd.nextInt(10000))); - } else if (k < 90) { - nodeBuilder.setProperty("B" + rnd.nextInt(1000), createBlob(nodeStore, 10000000)); - } else { - descent(nodeStore, nodeBuilder); - } - } - - private void descent(NodeStore nodeStore, NodeBuilder nodeBuilder) throws IOException { - long count = nodeBuilder.getChildNodeCount(Long.MAX_VALUE); - if (count > 0) { - int c = rnd.nextInt((int) count); - String name = Iterables.get(nodeBuilder.getChildNodeNames(), c); - modify(nodeStore, nodeBuilder.getChildNode(name)); - } - } - - private static Blob createBlob(NodeStore nodeStore, int size) throws IOException { - byte[] data = new byte[size]; - new Random().nextBytes(data); - return nodeStore.createBlob(new ByteArrayInputStream(data)); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentParserTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentParserTest.java deleted file mode 100644 index 5195f9e..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentParserTest.java +++ /dev/null @@ -1,460 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.base.Strings.repeat; -import static com.google.common.collect.Lists.newArrayListWithCapacity; -import static com.google.common.collect.Maps.newHashMap; -import static junitx.framework.ComparableAssert.assertEquals; -import static org.apache.jackrabbit.oak.api.Type.BINARY; -import static org.apache.jackrabbit.oak.api.Type.LONGS; -import static org.apache.jackrabbit.oak.api.Type.NAME; -import static org.apache.jackrabbit.oak.api.Type.NAMES; -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.MEDIUM_LIMIT; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.SMALL_LIMIT; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentParser.BlobType.LONG; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentParser.BlobType.MEDIUM; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentParser.BlobType.SMALL; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.V_10; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.V_11; -import static org.apache.jackrabbit.oak.plugins.segment.TestUtils.newRecordId; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.withSettings; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.concurrent.atomic.AtomicInteger; - -import com.google.common.collect.ImmutableList; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.plugins.memory.ArrayBasedBlob; -import org.apache.jackrabbit.oak.plugins.segment.SegmentParser.BlobInfo; -import org.apache.jackrabbit.oak.plugins.segment.SegmentParser.ListInfo; -import org.apache.jackrabbit.oak.plugins.segment.SegmentParser.MapInfo; -import org.apache.jackrabbit.oak.plugins.segment.SegmentParser.NodeInfo; -import org.apache.jackrabbit.oak.plugins.segment.SegmentParser.ValueInfo; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) -public class SegmentParserTest { - private final SegmentVersion segmentVersion; - - private SegmentStore store; - private SegmentWriter writer; - - @Parameterized.Parameters - public static List fixtures() { - return ImmutableList.of(new SegmentVersion[] {V_10}, new SegmentVersion[] {V_11}); - } - - public SegmentParserTest(SegmentVersion segmentVersion) { - this.segmentVersion = segmentVersion; - } - - private static class TestParser extends SegmentParser { - private final String name; - - private TestParser(String name) { - this.name = name; - } - - private void throwUOE(String method) { - throw new UnsupportedOperationException(name + " must not call " + method); - } - - @Override - protected void onNode(RecordId parentId, RecordId nodeId) { - throwUOE("onNode"); - } - - @Override - protected void onTemplate(RecordId parentId, RecordId templateId) { - throwUOE("onTemplate"); - } - - @Override - protected void onMap(RecordId parentId, RecordId mapId, MapRecord map) { - throwUOE("onMap"); - } - - @Override - protected void onMapDiff(RecordId parentId, RecordId mapId, MapRecord map) { - throwUOE("onMapDiff"); - } - - @Override - protected void onMapLeaf(RecordId parentId, RecordId mapId, MapRecord map) { - throwUOE("onMapLeaf"); - } - - @Override - protected void onMapBranch(RecordId parentId, RecordId mapId, MapRecord map) { - throwUOE("onMapBranch"); - } - - @Override - protected void onProperty(RecordId parentId, RecordId propertyId, PropertyTemplate template) { - throwUOE("onProperty"); - } - - @Override - protected void onValue(RecordId parentId, RecordId valueId, Type type) { - throwUOE("onValue"); - } - - @Override - protected void onBlob(RecordId parentId, RecordId blobId) { - throwUOE("onBlob"); - } - - @Override - protected void onString(RecordId parentId, RecordId stringId) { - throwUOE("onString"); - } - - @Override - protected void onList(RecordId parentId, RecordId listId, int count) { - throwUOE("onList"); - } - - @Override - protected void onListBucket(RecordId parentId, RecordId listId, int index, int count, int capacity) { - throwUOE("onListBucket"); - } - } - - @Before - public void setup() { - store = mock(SegmentStore.class, withSettings().stubOnly()); - SegmentTracker tracker = new SegmentTracker(store); - when(store.getTracker()).thenReturn(tracker); - writer = new SegmentWriter(store, segmentVersion, ""); - } - - @Test - public void emptyNode() throws IOException { - SegmentNodeState node = writer.writeNode(EMPTY_NODE); - NodeInfo info = new TestParser("emptyNode") { - @Override protected void onTemplate(RecordId parentId, RecordId templateId) { } - }.parseNode(node.getRecordId()); - assertEquals(node.getRecordId(), info.nodeId); - assertEquals(0, info.nodeCount); - assertEquals(0, info.propertyCount); - assertEquals(3, info.size); - } - - @Test - public void singleChildNode() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setChildNode("child"); - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - NodeInfo info = new TestParser("singleChildNode") { - @Override protected void onNode(RecordId parentId, RecordId nodeId) { } - @Override protected void onTemplate(RecordId parentId, RecordId templateId) { } - }.parseNode(node.getRecordId()); - assertEquals(node.getRecordId(), info.nodeId); - assertEquals(1, info.nodeCount); - assertEquals(0, info.propertyCount); - assertEquals(6, info.size); - } - - @Test - public void node() throws IOException { - final NodeBuilder builder = EMPTY_NODE.builder(); - builder.setChildNode("one"); - builder.setChildNode("two"); - builder.setProperty("three", 42); - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - NodeInfo info = new TestParser("node") { - @Override protected void onNode(RecordId parentId, RecordId nodeId) { } - @Override protected void onTemplate(RecordId parentId, RecordId templateId) { } - @Override protected void onMap(RecordId parentId, RecordId mapId, MapRecord map) { } - @Override protected void onProperty(RecordId parentId, RecordId propertyId, PropertyTemplate template) { } - @Override - protected void onList(RecordId parentId, RecordId listId, int count) { - if (segmentVersion == V_10) { - super.onList(parentId, listId, count); - } - } - }.parseNode(node.getRecordId()); - assertEquals(node.getRecordId(), info.nodeId); - assertEquals(2, info.nodeCount); - assertEquals(1, info.propertyCount); - assertEquals(9, info.size); - } - - @Test - public void template() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setChildNode("n"); - builder.setProperty("p", 1); - builder.setProperty("jcr:primaryType", "type", NAME); - builder.setProperty("jcr:mixinTypes", ImmutableList.of("type1", "type2"), NAMES); - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - NodeInfo nodeInfo = new TestParser("template") { - @Override - protected void onTemplate(RecordId parentId, RecordId templateId) { - TemplateInfo info = parseTemplate(templateId); - assertEquals(templateId, info.templateId); - assertTrue(info.hasPrimaryType); - assertTrue(info.hasMixinType); - assertFalse(info.zeroChildNodes); - assertFalse(info.manyChildNodes); - assertEquals(2, info.mixinCount); - assertEquals(1, info.propertyCount); - assertEquals(20, info.size); - } - @Override protected void onString(RecordId parentId, RecordId stringId) { } - @Override protected void onNode(RecordId parentId, RecordId nodeId) { } - @Override protected void onProperty(RecordId parentId, RecordId propertyId, PropertyTemplate template) { } - @Override - protected void onList(RecordId parentId, RecordId listId, int count) { - if (segmentVersion == V_10) { - super.onList(parentId, listId, count); - } - } - }.parseNode(node.getRecordId()); - } - - @Test - public void emptyMap() throws IOException { - Map empty = newHashMap(); - MapRecord map = writer.writeMap(null, empty); - MapInfo mapInfo = new TestParser("emptyMap") { - @Override protected void onMapLeaf(RecordId parentId, RecordId mapId, MapRecord map) { } - }.parseMap(null, map.getRecordId(), map); - assertEquals(map.getRecordId(), mapInfo.mapId); - assertEquals(-1, mapInfo.size); - } - - @Test - public void nonEmptyMap() throws IOException { - Random rnd = new Random(); - MapRecord base = writer.writeMap(null, createMap(33, rnd)); - MapRecord map = writer.writeMap(base, createMap(1, rnd)); - final AtomicInteger size = new AtomicInteger(); - MapInfo mapInfo = new TestParser("nonEmptyMap") { - @Override - protected void onMapDiff(RecordId parentId, RecordId mapId, MapRecord map) { - MapInfo mapInfo = parseMapDiff(mapId, map); - assertEquals(mapId, mapInfo.mapId); - size.addAndGet(mapInfo.size); - } - @Override - protected void onMap(RecordId parentId, RecordId mapId, MapRecord map) { - MapInfo mapInfo = parseMap(parentId, mapId, map); - assertEquals(mapId, mapInfo.mapId); - size.addAndGet(mapInfo.size); - } - @Override - protected void onMapBranch(RecordId parentId, RecordId mapId, MapRecord map) { - MapInfo mapInfo = parseMapBranch(mapId, map); - assertEquals(mapId, mapInfo.mapId); - size.addAndGet(mapInfo.size); - } - @Override - protected void onMapLeaf(RecordId parentId, RecordId mapId, MapRecord map) { - MapInfo mapInfo = parseMapLeaf(mapId, map); - assertEquals(mapId, mapInfo.mapId); - size.addAndGet(mapInfo.size); - } - @Override protected void onString(RecordId parentId, RecordId stringId) { } - }.parseMap(null, map.getRecordId(), map); - assertEquals(map.getRecordId(), mapInfo.mapId); - assertEquals(-1, mapInfo.size); - assertEquals(456, size.get()); - } - - private Map createMap(int size, Random rnd) { - Map map = newHashMap(); - for (int k = 0; k < size; k++) { - map.put("k" + k, newRecordId(store.getTracker(), rnd)); - } - return map; - } - - @Test - public void singleValueProperty() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("p", 1); - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - NodeInfo nodeInfo = new TestParser("singleValueProperty") { - @Override - protected void onProperty(RecordId parentId, RecordId propertyId, PropertyTemplate template) { - PropertyInfo propertyInfo = parseProperty(parentId, propertyId, template); - assertEquals(propertyId, propertyInfo.propertyId); - assertEquals(-1, propertyInfo.count); - assertEquals(0, propertyInfo.size); - } - @Override protected void onTemplate(RecordId parentId, RecordId templateId) { } - @Override protected void onValue(RecordId parentId, RecordId valueId, Type type) { } - @Override - protected void onList(RecordId parentId, RecordId listId, int count) { - if (segmentVersion == V_10) { - super.onList(parentId, listId, count); - } - } - }.parseNode(node.getRecordId()); - } - - @Test - public void multiValueProperty() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("p", ImmutableList.of(1L, 2L, 3L, 4L), LONGS); - SegmentNodeState node = writer.writeNode(builder.getNodeState()); - NodeInfo nodeInfo = new TestParser("multiValueProperty") { - @Override - protected void onProperty(RecordId parentId, RecordId propertyId, PropertyTemplate template) { - PropertyInfo propertyInfo = parseProperty(parentId, propertyId, template); - assertEquals(propertyId, propertyInfo.propertyId); - assertEquals(4, propertyInfo.count); - assertEquals(7, propertyInfo.size); - } - @Override protected void onTemplate(RecordId parentId, RecordId templateId) { } - @Override protected void onValue(RecordId parentId, RecordId valueId, Type type) { } - @Override protected void onList(RecordId parentId, RecordId listId, int count) { } - }.parseNode(node.getRecordId()); - } - - @Test - public void smallBlob() throws IOException { - SegmentBlob blob = writer.writeBlob(createRandomBlob(4)); - ValueInfo valueInfo = new TestParser("smallBlob") { - @Override - protected void onBlob(RecordId parentId, RecordId blobId) { - BlobInfo blobInfo = parseBlob(blobId); - assertEquals(blobId, blobInfo.blobId); - assertEquals(SMALL, blobInfo.blobType); - assertEquals(5, blobInfo.size); - } - }.parseValue(null, blob.getRecordId(), BINARY); - assertEquals(blob.getRecordId(), valueInfo.valueId); - assertEquals(BINARY, valueInfo.type); - } - - @Test - public void mediumBlob() throws IOException { - SegmentBlob blob = writer.writeBlob(createRandomBlob(SMALL_LIMIT)); - ValueInfo valueInfo = new TestParser("mediumBlob") { - @Override - protected void onBlob(RecordId parentId, RecordId blobId) { - BlobInfo blobInfo = parseBlob(blobId); - assertEquals(blobId, blobInfo.blobId); - assertEquals(MEDIUM, blobInfo.blobType); - assertEquals(SMALL_LIMIT + 2, blobInfo.size); - } - }.parseValue(null, blob.getRecordId(), BINARY); - assertEquals(blob.getRecordId(), valueInfo.valueId); - assertEquals(BINARY, valueInfo.type); - } - - @Test - public void longBlob() throws IOException { - SegmentBlob blob = writer.writeBlob(createRandomBlob(MEDIUM_LIMIT)); - ValueInfo valueInfo = new TestParser("longBlob") { - @Override - protected void onBlob(RecordId parentId, RecordId blobId) { - BlobInfo blobInfo = parseBlob(blobId); - assertEquals(blobId, blobInfo.blobId); - assertEquals(LONG, blobInfo.blobType); - assertEquals(MEDIUM_LIMIT + 11, blobInfo.size); - } - @Override protected void onList(RecordId parentId, RecordId listId, int count) { } - }.parseValue(null, blob.getRecordId(), BINARY); - assertEquals(blob.getRecordId(), valueInfo.valueId); - assertEquals(BINARY, valueInfo.type); - } - - private static Blob createRandomBlob(int size) { - byte[] bytes = new byte[size]; - new Random().nextBytes(bytes); - return new ArrayBasedBlob(bytes); - } - - @Test - public void shortString() throws IOException { - RecordId stringId = writer.writeString("short"); - BlobInfo blobInfo = new TestParser("shortString").parseString(stringId); - assertEquals(stringId, blobInfo.blobId); - assertEquals(SMALL, blobInfo.blobType); - assertEquals(6, blobInfo.size); - } - - @Test - public void mediumString() throws IOException { - RecordId stringId = writer.writeString(repeat("s", SMALL_LIMIT)); - BlobInfo blobInfo = new TestParser("mediumString").parseString(stringId); - assertEquals(stringId, blobInfo.blobId); - assertEquals(MEDIUM, blobInfo.blobType); - assertEquals(SMALL_LIMIT + 2, blobInfo.size); - } - - @Test - public void longString() throws IOException { - RecordId stringId = writer.writeString(repeat("s", MEDIUM_LIMIT)); - BlobInfo blobInfo = new TestParser("longString"){ - @Override protected void onList(RecordId parentId, RecordId listId, int count) { } - }.parseString(stringId); - assertEquals(stringId, blobInfo.blobId); - assertEquals(LONG, blobInfo.blobType); - assertEquals(MEDIUM_LIMIT + 11, blobInfo.size); - } - - @Test - public void emptyList() { - RecordId listId = newRecordId(store.getTracker(), new Random()); - ListInfo listInfo = new TestParser("emptyList").parseList(null, listId, 0); - assertEquals(listId, listInfo.listId); - assertEquals(0, listInfo.count); - assertEquals(0, listInfo.size); - } - - @Test - public void nonEmptyList() throws IOException { - int count = 100000; - Random rnd = new Random(); - List list = newArrayListWithCapacity(count); - for (int k = 0; k < count; k++) { - list.add(newRecordId(store.getTracker(), rnd)); - } - RecordId listId = writer.writeList(list); - ListInfo listInfo = new TestParser("nonEmptyList"){ - @Override - protected void onListBucket(RecordId parentId, RecordId listId, int index, int count, int capacity) { - parseListBucket(listId, index, count, capacity); - } - }.parseList(null, listId, count); - assertEquals(listId, listInfo.listId); - assertEquals(count, listInfo.count); - assertEquals(301185, listInfo.size); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentSizeTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentSizeTest.java deleted file mode 100644 index 6830162..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentSizeTest.java +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static junit.framework.Assert.assertEquals; -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; - -import java.io.IOException; -import java.util.Calendar; -import java.util.Collections; - -import com.google.common.collect.ImmutableList; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.plugins.memory.PropertyStates; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.util.ISO8601; -import org.junit.Test; - - -/** - * Test case for ensuring that segment size remains within bounds. - */ -public class SegmentSizeTest { - - @Test - public void testNodeSize() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - expectSize(96, builder); - expectAmortizedSize(4, builder); - - builder = EMPTY_NODE.builder(); - builder.setProperty("foo", "bar"); - expectSize(112, builder); - expectAmortizedSize(8, builder); - - builder = EMPTY_NODE.builder(); - builder.setProperty("foo", "bar"); - builder.setProperty("baz", 123); - expectSize(128, builder); - expectAmortizedSize(16, builder); - - builder = EMPTY_NODE.builder(); - builder.child("foo"); - expectSize(128, builder); - expectAmortizedSize(12, builder); - - builder = EMPTY_NODE.builder(); - builder.child("foo"); - builder.child("bar"); - expectSize(144, builder); - expectAmortizedSize(40, builder); - } - - @Test - public void testDuplicateStrings() throws IOException { - String string = "More than just a few bytes of example content."; - - SegmentWriter writer = new MemoryStore().getTracker().getWriter(); - SegmentNodeBuilder builder = writer.writeNode(EMPTY_NODE).builder(); - - builder.setProperty(PropertyStates.createProperty( - "test", Collections.nCopies(1, string), Type.STRINGS)); - RecordId id1 = builder.getNodeState().getRecordId(); - - builder.setProperty(PropertyStates.createProperty( - "test", Collections.nCopies(12, string), Type.STRINGS)); - RecordId id2 = builder.getNodeState().getRecordId(); - assertEquals(16 + 12 * Segment.RECORD_ID_BYTES, - id1.getOffset() - id2.getOffset()); - - builder.setProperty(PropertyStates.createProperty( - "test", Collections.nCopies(100, string), Type.STRINGS)); - RecordId id3 = builder.getNodeState().getRecordId(); - assertEquals(16 + 100 * Segment.RECORD_ID_BYTES, - id2.getOffset() - id3.getOffset()); - } - - @Test - public void testDuplicateDates() throws IOException { - String now = ISO8601.format(Calendar.getInstance()); - - SegmentWriter writer = new MemoryStore().getTracker().getWriter(); - SegmentNodeBuilder builder = writer.writeNode(EMPTY_NODE).builder(); - - builder.setProperty(PropertyStates.createProperty( - "test", Collections.nCopies(1, now), Type.DATES)); - RecordId id1 = builder.getNodeState().getRecordId(); - - builder.setProperty(PropertyStates.createProperty( - "test", Collections.nCopies(12, now), Type.DATES)); - RecordId id2 = builder.getNodeState().getRecordId(); - assertEquals(16 + 12 * Segment.RECORD_ID_BYTES, - id1.getOffset() - id2.getOffset()); - - builder.setProperty(PropertyStates.createProperty( - "test", Collections.nCopies(100, now), Type.DATES)); - RecordId id3 = builder.getNodeState().getRecordId(); - assertEquals(16 + 100 * Segment.RECORD_ID_BYTES, - id2.getOffset() - id3.getOffset()); - } - - @Test - public void testAccessControlNodes() throws IOException { - NodeBuilder builder = EMPTY_NODE.builder(); - builder.setProperty("jcr:primaryType", "rep:ACL", Type.NAME); - expectSize(96, builder); - expectAmortizedSize(4, builder); - - NodeBuilder deny = builder.child("deny"); - deny.setProperty("jcr:primaryType", "rep:DenyACE", Type.NAME); - deny.setProperty("rep:principalName", "everyone"); - deny.setProperty(PropertyStates.createProperty( - "rep:privileges", ImmutableList.of("jcr:read"), Type.NAMES)); - expectSize(240, builder); - expectAmortizedSize(32, builder); - - NodeBuilder allow = builder.child("allow"); - allow.setProperty("jcr:primaryType", "rep:GrantACE"); - allow.setProperty("rep:principalName", "administrators"); - allow.setProperty(PropertyStates.createProperty( - "rep:privileges", ImmutableList.of("jcr:all"), Type.NAMES)); - expectSize(368, builder); - expectAmortizedSize(84, builder); - - NodeBuilder deny0 = builder.child("deny0"); - deny0.setProperty("jcr:primaryType", "rep:DenyACE", Type.NAME); - deny0.setProperty("rep:principalName", "everyone"); - deny0.setProperty("rep:glob", "*/activities/*"); - builder.setProperty(PropertyStates.createProperty( - "rep:privileges", ImmutableList.of("jcr:read"), Type.NAMES)); - expectSize(464, builder); - expectAmortizedSize(124, builder); - - NodeBuilder allow0 = builder.child("allow0"); - allow0.setProperty("jcr:primaryType", "rep:GrantACE"); - allow0.setProperty("rep:principalName", "user-administrators"); - allow0.setProperty(PropertyStates.createProperty( - "rep:privileges", ImmutableList.of("jcr:all"), Type.NAMES)); - expectSize(528, builder); - expectAmortizedSize(160, builder); - } - - @Test - public void testFlatNodeUpdate() throws IOException { - SegmentStore store = new MemoryStore(); - SegmentWriter writer = store.getTracker().getWriter(); - - NodeBuilder builder = EMPTY_NODE.builder(); - for (int i = 0; i < 1000; i++) { - builder.child("child" + i); - } - - SegmentNodeState state = writer.writeNode(builder.getNodeState()); - writer.flush(); - Segment segment = store.readSegment(state.getRecordId().getSegmentId()); - assertEquals(27584, segment.size()); - - writer.flush(); // force flushing of the previous segment - - builder = state.builder(); - builder.child("child1000"); - state = writer.writeNode(builder.getNodeState()); - writer.flush(); - segment = store.readSegment(state.getRecordId().getSegmentId()); - assertEquals(560, segment.size()); - } - - private static void expectSize(int expectedSize, NodeBuilder builder) throws IOException { - SegmentWriter writer = new MemoryStore().getTracker().getWriter(); - RecordId id = writer.writeNode(builder.getNodeState()).getRecordId(); - writer.flush(); - Segment segment = id.getSegment(); - assertEquals("Unexpected size of segment " + id + " info=" + segment.getSegmentInfo(), - expectedSize, segment.size()); - } - - private static void expectAmortizedSize(int expectedSize, NodeBuilder builder) throws IOException { - SegmentWriter writer = new MemoryStore().getTracker().getWriter(); - NodeState state = builder.getNodeState(); - RecordId id1 = writer.writeNode(state).getRecordId(); - RecordId id2 = writer.writeNode(state).getRecordId(); - assertEquals(expectedSize, id1.getOffset() - id2.getOffset()); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentVersionTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentVersionTest.java deleted file mode 100644 index 5e0bc06..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentVersionTest.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static org.apache.jackrabbit.oak.api.Type.LONG; -import static org.apache.jackrabbit.oak.api.Type.LONGS; -import static org.apache.jackrabbit.oak.api.Type.STRING; -import static org.apache.jackrabbit.oak.api.Type.STRINGS; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.LATEST_VERSION; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.V_10; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.V_11; -import static org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy.CleanupType.CLEAN_NONE; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.File; -import java.io.IOException; -import java.util.concurrent.Callable; - -import javax.annotation.Nonnull; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.plugins.segment.compaction.CompactionStrategy; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStateDiff; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -public class SegmentVersionTest { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - private File getFileStoreFolder() { - return folder.getRoot(); - } - - private File directory; - - @Test - public void latestVersion() { - assertEquals(V_11, LATEST_VERSION); - } - - @Test - public void compareOldRevision() throws Exception { - FileStore fileStoreV10 = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withSegmentVersion(V_10).build(); - try { - NodeState content = addTestContent(fileStoreV10, "content").getChildNode("content"); - assertVersion(content, SegmentVersion.V_10); - NodeBuilder builder = content.builder(); - builder.setChildNode("foo"); - content.compareAgainstBaseState(builder.getNodeState(), new NodeStateDiff() { - @Override - public boolean propertyAdded(PropertyState after) { - fail(); - return false; - } - - @Override - public boolean propertyChanged(PropertyState before, PropertyState after) { - fail(); - return false; - } - - @Override - public boolean propertyDeleted(PropertyState before) { - fail(); - return false; - } - - @Override - public boolean childNodeAdded(String name, NodeState after) { - fail(); - return false; - } - - @Override - public boolean childNodeChanged(String name, NodeState before, NodeState after) { - fail(); - return false; - } - - @Override - public boolean childNodeDeleted(String name, NodeState before) { - assertEquals("foo", name); - return false; - } - }); - } finally { - fileStoreV10.close(); - } - } - - @Test - public void readOldVersions() throws Exception { - FileStore fileStoreV10 = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withSegmentVersion(V_10).build(); - try { - NodeState content = addTestContent(fileStoreV10, "content"); - assertVersion(content, SegmentVersion.V_10); - } finally { - fileStoreV10.close(); - } - - FileStore fileStoreV11 = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).build(); - try { - verifyContent(fileStoreV11, "content"); - } finally { - fileStoreV11.close(); - } - } - - @Test - public void mixedVersions() throws Exception { - FileStore fileStoreV10 = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withSegmentVersion(V_10).build(); - try { - NodeState content10 = addTestContent(fileStoreV10, "content10"); - assertVersion(content10, SegmentVersion.V_10); - } finally { - fileStoreV10.close(); - } - - FileStore fileStoreV11 = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).build(); - try { - NodeState content11 = addTestContent(fileStoreV11, "content11"); - assertVersion(content11, V_11); - verifyContent(fileStoreV11, "content10"); - verifyContent(fileStoreV11, "content11"); - } finally { - fileStoreV11.close(); - } - } - - @Test - public void migrate() throws Exception { - FileStore fileStoreV10 = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withSegmentVersion(V_10).build(); - try { - addTestContent(fileStoreV10, "content10"); - } finally { - fileStoreV10.close(); - } - - FileStore fileStoreV11 = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).build(); - try { - fileStoreV11.setCompactionStrategy(new CompactionStrategy(false, false, - CLEAN_NONE, 0, (byte) 0) { - @Override - public boolean compacted(@Nonnull Callable setHead) throws Exception { - return setHead.call(); - } - }); - checkAllVersions(fileStoreV11.getHead(), SegmentVersion.V_10); - fileStoreV11.compact(); - checkAllVersions(fileStoreV11.getHead(), V_11); - } finally { - fileStoreV11.close(); - } - } - - private static void checkAllVersions(SegmentNodeState head, SegmentVersion version) { - assertVersion(head, version); - for (ChildNodeEntry childNodeEntry : head.getChildNodeEntries()) { - checkAllVersions((SegmentNodeState) childNodeEntry.getNodeState(), version); - } - } - - private static void assertVersion(NodeState node, SegmentVersion version) { - assertTrue(node instanceof SegmentNodeState); - assertEquals(version, ((SegmentNodeState) node).getSegment().getSegmentVersion()); - } - - @SuppressWarnings("deprecation") - private static NodeState addTestContent(FileStore fs, String nodeName) - throws CommitFailedException { - NodeStore store = SegmentNodeStore.builder(fs).build(); - NodeBuilder builder = store.getRoot().builder(); - - NodeBuilder content = builder.child(nodeName); - content.setProperty("a", 1); - content.setProperty("aM", ImmutableList.of(1L, 2L, 3L, 4L), LONGS); - - content.setProperty("b", "azerty"); - content.setProperty("bM", - ImmutableList.of("a", "z", "e", "r", "t", "y"), STRINGS); - - // add blobs? - - return store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - } - - private static void verifyContent(FileStore fs, String nodeName) { - NodeStore store = SegmentNodeStore.builder(fs).build(); - SegmentNodeState content = (SegmentNodeState) store.getRoot() - .getChildNode(nodeName); - - assertEquals(new Long(1), content.getProperty("a").getValue(LONG)); - assertEquals(ImmutableList.of(1L, 2L, 3L, 4L), - Lists.newArrayList(content.getProperty("aM").getValue(LONGS))); - - assertEquals("azerty", content.getProperty("b").getValue(STRING)); - assertEquals("azerty", content.getString("b")); - - assertEquals(ImmutableList.of("a", "z", "e", "r", "t", "y"), - Lists.newArrayList(content.getProperty("bM").getValue(STRINGS))); - assertEquals(ImmutableList.of("a", "z", "e", "r", "t", "y"), - Lists.newArrayList(content.getStrings("bM"))); - } -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/ShortSetTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/ShortSetTest.java deleted file mode 100644 index 72383ea..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/ShortSetTest.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.util.Random; - -import org.apache.jackrabbit.oak.plugins.segment.RecordIdSet.ShortSet; -import org.junit.Test; - -public class ShortSetTest { - private final ShortSet set = new ShortSet(); - - @Test - public void empty() { - for (short k = Short.MIN_VALUE; k < Short.MAX_VALUE; k++) { - assertFalse(set.contains(k)); - } - } - - @Test - public void addOne() { - set.add(s(42)); - assertTrue(set.contains(s(42))); - } - - @Test - public void addTwo() { - set.add(s(21)); - set.add(s(42)); - assertTrue(set.contains(s(21))); - assertTrue(set.contains(s(42))); - } - - @Test - public void addTwoReverse() { - set.add(s(42)); - set.add(s(21)); - assertTrue(set.contains(s(21))); - assertTrue(set.contains(s(42))); - } - - @Test - public void addFirst() { - short[] elements = new short[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}; - addAndCheck(elements); - } - - @Test - public void addLast() { - short[] elements = new short[]{8, 7, 6, 5, 4, 3, 2, 1, 0, 9}; - addAndCheck(elements); - } - - @Test - public void addMedian() { - short[] elements = new short[]{0, 1, 2, 3, 4, 6, 7, 8, 9, 5}; - addAndCheck(elements); - } - - @Test - public void addRandom() { - short[] elements = new short[8192]; - Random rnd = new Random(); - for (int k = 0; k < elements.length; k++) { - elements[k] = s(rnd.nextInt(1 + Short.MAX_VALUE - Short.MIN_VALUE) + Short.MIN_VALUE); - } - - addAndCheck(elements); - } - - private void addAndCheck(short[] elements) { - for (short k : elements) { - set.add(k); - } - for (short k : elements) { - assertTrue(set.contains(k)); - } - } - - private static short s(int n) { - return (short) n; - } -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/StringCacheTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/StringCacheTest.java deleted file mode 100644 index 0062bd4..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/StringCacheTest.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.util.ArrayList; -import java.util.Random; -import java.util.concurrent.atomic.AtomicInteger; - -import javax.annotation.Nullable; - -import org.junit.Test; - -import com.google.common.base.Function; - -public class StringCacheTest { - - @Test - public void empty() { - final AtomicInteger counter = new AtomicInteger(); - Function loader = new Function() { - @Override @Nullable - public String apply(@Nullable Integer input) { - counter.incrementAndGet(); - return "" + input; - } - }; - StringCache c = new StringCache(0); - for (int repeat = 0; repeat < 10; repeat++) { - for (int i = 0; i < 1000; i++) { - assertEquals("" + i, c.getString(i, i, i, loader)); - } - } - // the LIRS cache should be almost empty (low hit rate there) - assertTrue("" + counter, counter.get() > 1000); - // but the fast cache should improve the total hit rate - assertTrue("" + counter, counter.get() < 5000); - } - - @Test - public void largeEntries() { - final AtomicInteger counter = new AtomicInteger(); - final String large = new String(new char[1024]); - Function loader = new Function() { - @Override @Nullable - public String apply(@Nullable Integer input) { - counter.incrementAndGet(); - return large + input; - } - }; - StringCache c = new StringCache(1024); - for (int repeat = 0; repeat < 10; repeat++) { - for (int i = 0; i < 1000; i++) { - assertEquals(large + i, c.getString(i, i, i, loader)); - assertEquals(large + 0, c.getString(0, 0, 0, loader)); - } - } - // the LIRS cache should be almost empty (low hit rate there) - // and large strings are not kept in the fast cache, so hit rate should be bad - assertTrue("" + counter, counter.get() > 9000); - assertTrue("" + counter, counter.get() < 10000); - } - - @Test - public void clear() { - final AtomicInteger counter = new AtomicInteger(); - Function uniqueLoader = new Function() { - @Override @Nullable - public String apply(@Nullable Integer input) { - return "" + counter.incrementAndGet(); - } - }; - StringCache c = new StringCache(0); - // load a new entry - assertEquals("1", c.getString(0, 0, 0, uniqueLoader)); - // but only once - assertEquals("1", c.getString(0, 0, 0, uniqueLoader)); - c.clear(); - // after clearing the cache, load a new entry - assertEquals("2", c.getString(0, 0, 0, uniqueLoader)); - assertEquals("2", c.getString(0, 0, 0, uniqueLoader)); - } - - @Test - public void randomized() { - ArrayList> loaderList = new ArrayList>(); - int segmentCount = 10; - for (int i = 0; i < segmentCount; i++) { - final int x = i; - Function loader = new Function() { - @Override @Nullable - public String apply(@Nullable Integer input) { - return "loader #" + x + " offset " + input; - } - }; - loaderList.add(loader); - } - StringCache c = new StringCache(10); - Random r = new Random(1); - for (int i = 0; i < 1000; i++) { - int segment = r.nextInt(segmentCount); - int offset = r.nextInt(10); - Function loader = loaderList.get(segment); - String x = c.getString(segment, segment, offset, loader); - assertEquals(loader.apply(offset), x); - } - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/TemplateTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/TemplateTest.java deleted file mode 100644 index 057ad31..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/TemplateTest.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static org.apache.jackrabbit.oak.plugins.memory.PropertyStates.createProperty; - -import java.io.IOException; -import java.util.ArrayList; - -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; - -import static org.junit.Assert.*; - -import org.junit.Assert; -import org.junit.Test; - -public class TemplateTest { - - @Test - public void testHashCode() throws IOException { - // child node null vs "" - PropertyState primary = createProperty("primary", "primary"); - PropertyState mixin = createProperty("mixin", "mixin"); - PropertyTemplate[] properties = new PropertyTemplate[0]; - - Template t0 = new Template(primary, mixin, properties, ""); - Template t1 = new Template(primary, mixin, properties, null); - - assertNotEquals(t0.hashCode(), t1.hashCode()); - } - - @Test - public void testHashCode2() throws IOException { - // mixins null vs [] - PropertyState primary = createProperty("primary", "primary"); - PropertyState mixin = createProperty("mixin", new ArrayList(), - Type.STRINGS); - PropertyTemplate[] properties = new PropertyTemplate[0]; - String childNode = "c"; - - Template t0 = new Template(primary, null, properties, childNode); - Template t1 = new Template(primary, mixin, properties, childNode); - - assertNotEquals(t0.hashCode(), t1.hashCode()); - } - - @Test - public void testEquals() throws IOException { - // same properties, different order - PropertyState primary = createProperty("primary", "primary"); - PropertyState mixin = createProperty("mixin", "mixin"); - - PropertyTemplate p0 = new PropertyTemplate(createProperty("p0", "v0")); - PropertyTemplate p1 = new PropertyTemplate(createProperty("p1", "v1")); - PropertyTemplate[] pt0 = new PropertyTemplate[] { p0, p1 }; - PropertyTemplate[] pt1 = new PropertyTemplate[] { p1, p0 }; - - String childNode = "c"; - - Template t0 = new Template(primary, mixin, pt0, childNode); - Template t1 = new Template(primary, mixin, pt1, childNode); - - assertEquals(t0, t1); - } -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/TestUtils.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/TestUtils.java deleted file mode 100644 index 650c446..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/TestUtils.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment; - -import static com.google.common.collect.Maps.newHashMap; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.MAX_SEGMENT_SIZE; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.RECORD_ALIGN_BITS; - -import java.util.Map; -import java.util.Random; - -import javax.annotation.Nonnull; - -// FIXME SegmentTestUtils duplicates this -public final class TestUtils { - private TestUtils() {} - - public static RecordId newRecordId(SegmentTracker factory, Random random) { - SegmentId id = factory.newDataSegmentId(); - RecordId r = new RecordId(id, newValidOffset(random)); - return r; - } - - public static int newValidOffset(Random random) { - return random.nextInt(MAX_SEGMENT_SIZE >> RECORD_ALIGN_BITS) << RECORD_ALIGN_BITS; - } - - /** - * Returns a new valid record offset, between {@code a} and {@code b}, - * exclusive. - */ - public static int newValidOffset(@Nonnull Random random, int a, int b) { - int p = (a >> RECORD_ALIGN_BITS) + 1; - int q = (b >> RECORD_ALIGN_BITS); - return (p + random.nextInt(q - p)) << RECORD_ALIGN_BITS; - } - - /** - * Create a random map of record ids. - * - * @param rnd - * @param tracker - * @param segmentCount number of segments - * @param entriesPerSegment number of records per segment - * @return map of record ids - */ - public static Map randomRecordIdMap(Random rnd, SegmentTracker tracker, - int segmentCount, int entriesPerSegment) { - Map map = newHashMap(); - for (int i = 0; i < segmentCount; i++) { - SegmentId id = tracker.newDataSegmentId(); - int offset = MAX_SEGMENT_SIZE; - for (int j = 0; j < entriesPerSegment; j++) { - offset = newValidOffset(rnd, (entriesPerSegment - j) << RECORD_ALIGN_BITS, offset); - RecordId before = new RecordId(id, offset); - RecordId after = new RecordId( - tracker.newDataSegmentId(), - newValidOffset(rnd, 0, MAX_SEGMENT_SIZE)); - map.put(before, after); - } - } - return map; - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/CompactionEstimatorTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/CompactionEstimatorTest.java deleted file mode 100644 index 7c3fbbf..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/CompactionEstimatorTest.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static org.junit.Assert.assertTrue; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.util.Random; - -import com.google.common.base.Suppliers; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -public class CompactionEstimatorTest { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - private File getFileStoreFolder() { - return folder.getRoot(); - } - - @Test - public void testGainEstimator() throws Exception { - final int MB = 1024 * 1024; - final int blobSize = 2 * MB; - - FileStore fileStore = FileStore.builder(getFileStoreFolder()).withMaxFileSize(2).withMemoryMapping(false).build(); - SegmentNodeStore nodeStore = SegmentNodeStore.builder(fileStore).build(); - - // 1. Create some blob properties - NodeBuilder builder = nodeStore.getRoot().builder(); - - NodeBuilder c1 = builder.child("c1"); - c1.setProperty("a", createBlob(nodeStore, blobSize)); - c1.setProperty("b", "foo"); - - NodeBuilder c2 = builder.child("c2"); - c2.setProperty("a", createBlob(nodeStore, blobSize)); - c2.setProperty("b", "foo"); - - NodeBuilder c3 = builder.child("c3"); - c3.setProperty("a", createBlob(nodeStore, blobSize)); - c3.setProperty("b", "foo"); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - // 2. Now remove the property - builder = nodeStore.getRoot().builder(); - builder.child("c1").remove(); - builder.child("c2").remove(); - nodeStore.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - fileStore.flush(); - try { - // should be at 66% - assertTrue(fileStore.estimateCompactionGain(Suppliers.ofInstance(false)) - .estimateCompactionGain(0) > 60); - } finally { - fileStore.close(); - } - } - - private static Blob createBlob(NodeStore nodeStore, int size) throws IOException { - byte[] data = new byte[size]; - new Random().nextBytes(data); - return nodeStore.createBlob(new ByteArrayInputStream(data)); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/ExternalBlobReferenceTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/ExternalBlobReferenceTest.java deleted file mode 100644 index e213379..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/ExternalBlobReferenceTest.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static org.junit.Assert.assertEquals; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; - -import com.google.common.base.Strings; -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentBlob; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -public class ExternalBlobReferenceTest { - - @Rule - public final TemporaryFolder segmentFolder = new TemporaryFolder(new File("target")); - - private FileStore fileStore; - - private BlobStore blobStore; - - @Before - public void createFileStore() throws Exception { - blobStore = mock(BlobStore.class); - fileStore = FileStore.builder(segmentFolder.getRoot()).withBlobStore(blobStore).build(); - } - - @After - public void destroyFileStore() { - fileStore.close(); - } - - /** - * The {@code SegmentWriter} should be able to write blob IDs whose length - * is between 0 and {@code Segment.BLOB_ID_SMALL_LIMIT - 1} bytes. It should - * be possible to correctly read the blob ID back and pass it to the {@code - * BlobStore} to obtain information about the blob. - *

- * This code path executes only if the written stream is {@code - * Segment.MEDIUM_LIMIT} bytes long (or more). If the length of the stream - * is smaller, the binary value is inlined in the segment and the {@code - * BlobStore} is never called. - *

- * See OAK-3105. - */ - @Test - public void testShortBlobId() throws Exception { - testBlobIdWithLength(Segment.BLOB_ID_SMALL_LIMIT - 1); - } - - /** - * If the {@code BlobStore} returns a blob ID whose length is {@code - * Segment.BLOB_ID_SMALL_LIMIT} bytes long (or more), writing the stream - * should succeed. In this case, the blob ID is considered a long blob ID - * and an alternate encoding is used. It should be possible to correctly - * read the blob ID back and pass it to the {@code BlobStore} to obtain - * information about the blob. - *

- * This code path executes only if the written stream is {@code - * Segment.MEDIUM_LIMIT} bytes long (or more). If the length of the stream - * is smaller, the binary value is inlined in the segment and the {@code - * BlobStore} is never called. - *

- * See OAK-3105 and OAK-3107. - */ - @Test - public void testLongBlobId() throws Exception { - testBlobIdWithLength(Segment.BLOB_ID_SMALL_LIMIT); - } - - private void testBlobIdWithLength(int blobIdLength) throws Exception { - String blobId = Strings.repeat("x", blobIdLength); - long blobLength = Segment.MEDIUM_LIMIT; - - doReturn(blobId).when(blobStore).writeBlob(any(InputStream.class)); - doReturn(blobLength).when(blobStore).getBlobLength(blobId); - - SegmentBlob blob = fileStore.getTracker().getWriter().writeStream(newRandomInputStream(blobLength)); - - assertEquals(blobLength, blob.length()); - } - - private static InputStream newRandomInputStream(long size) { - return new LimitInputStream(new ConstantInputStream(0), size); - } - - private static class ConstantInputStream extends InputStream { - - private final int value; - - public ConstantInputStream(int value) { - this.value = value; - } - - @Override - public int read() { - return value; - } - - } - - private static class LimitInputStream extends InputStream { - - private final InputStream stream; - - private final long limit; - - private long read = 0; - - public LimitInputStream(InputStream stream, long limit) { - this.stream = stream; - this.limit = limit; - } - - @Override - public int read() throws IOException { - if (read >= limit) { - return -1; - } - - read = read + 1; - - return stream.read(); - } - - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/FileBlob.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/FileBlob.java deleted file mode 100644 index a389719..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/FileBlob.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; - -import javax.annotation.Nonnull; - -import org.apache.jackrabbit.oak.api.Blob; - -/** - * A blob as a file in the file system. - * Used for testing. - */ -public class FileBlob implements Blob { - - private final String path; - - public FileBlob(String path) { - this.path = path; - } - - @Override - public String getReference() { - return path; // FIXME: should be a secure reference - } - - @Override - public String getContentIdentity() { - return null; - } - - @Nonnull - @Override - public InputStream getNewStream() { - try { - return new FileInputStream(getFile()); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @Override - public long length() { - return getFile().length(); - } - - private File getFile() { - return new File(path); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof FileBlob) { - FileBlob other = (FileBlob) obj; - return this.path.equals(other.path); - } - return super.equals(obj); - } - - @Override - public int hashCode() { - return path.hashCode(); - } -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreIT.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreIT.java deleted file mode 100644 index 2201ba5..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreIT.java +++ /dev/null @@ -1,294 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static com.google.common.collect.Lists.newArrayList; -import static com.google.common.collect.Sets.newTreeSet; -import static org.apache.jackrabbit.oak.commons.FixturesHelper.Fixture.SEGMENT_MK; -import static org.apache.jackrabbit.oak.commons.FixturesHelper.getFixtures; -import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.V_11; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeTrue; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Random; - -import com.google.common.base.Strings; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.plugins.segment.Compactor; -import org.apache.jackrabbit.oak.plugins.segment.RecordId; -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentBlob; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeBuilder; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState; -import org.apache.jackrabbit.oak.plugins.segment.SegmentWriter; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore.ReadOnlyStore; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -public class FileStoreIT { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - private File getFileStoreFolder() { - return folder.getRoot(); - } - - @BeforeClass - public static void assumptions() { - assumeTrue(getFixtures().contains(SEGMENT_MK)); - } - - @Test - public void testRestartAndGCWithoutMM() throws Exception { - testRestartAndGC(false); - } - - @Test - public void testRestartAndGCWithMM() throws Exception { - testRestartAndGC(true); - } - - public void testRestartAndGC(boolean memoryMapping) throws Exception { - FileStore store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withMemoryMapping(memoryMapping).build(); - store.close(); - - store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withMemoryMapping(memoryMapping).build(); - SegmentNodeState base = store.getHead(); - SegmentNodeBuilder builder = base.builder(); - byte[] data = new byte[10 * 1024 * 1024]; - new Random().nextBytes(data); - Blob blob = builder.createBlob(new ByteArrayInputStream(data)); - builder.setProperty("foo", blob); - store.setHead(base, builder.getNodeState()); - store.flush(); - store.setHead(store.getHead(), base); - store.close(); - - store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withMemoryMapping(memoryMapping).build(); - store.gc(); - store.flush(); - store.close(); - - store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withMemoryMapping(memoryMapping).build(); - store.close(); - } - - @Test - public void testCompaction() throws Exception { - int largeBinarySize = 10 * 1024 * 1024; - - FileStore store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withMemoryMapping(false).build(); - SegmentWriter writer = store.getTracker().getWriter(); - - SegmentNodeState base = store.getHead(); - SegmentNodeBuilder builder = base.builder(); - byte[] data = new byte[largeBinarySize]; - new Random().nextBytes(data); - SegmentBlob blob = writer.writeStream(new ByteArrayInputStream(data)); - builder.setProperty("foo", blob); - builder.getNodeState(); // write the blob reference to the segment - builder.setProperty("foo", "bar"); - SegmentNodeState head = builder.getNodeState(); - assertTrue(store.setHead(base, head)); - assertEquals("bar", store.getHead().getString("foo")); - - Compactor compactor = new Compactor(store.getTracker()); - SegmentNodeState compacted = - compactor.compact(EMPTY_NODE, head, EMPTY_NODE); - store.close(); - - // First simulate the case where during compaction a reference to the - // older segments is added to a segment that the compactor is writing - store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withMemoryMapping(false).build(); - head = store.getHead(); - assertTrue(store.size() > largeBinarySize); - builder = head.builder(); - builder.setChildNode("old", head); // reference to pre-compacted state - builder.getNodeState(); - assertTrue(store.setHead(head, compacted)); - store.close(); - - // In this case the revision cleanup is unable to reclaim the old data - store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withMemoryMapping(false).build(); - assertTrue(store.size() > largeBinarySize); - store.cleanup(); - assertTrue(store.size() > largeBinarySize); - store.close(); - - // Now we do the same thing, but let the compactor use a different - // SegmentWriter - store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withMemoryMapping(false).build(); - head = store.getHead(); - assertTrue(store.size() > largeBinarySize); - writer = new SegmentWriter(store, V_11, ""); - compactor = new Compactor(store.getTracker()); - compacted = compactor.compact(EMPTY_NODE, head, EMPTY_NODE); - builder = head.builder(); - builder.setChildNode("old", head); // reference to pre-compacted state - builder.getNodeState(); - writer.flush(); - assertTrue(store.setHead(head, compacted)); - store.close(); - - // Revision cleanup is now able to reclaim the extra space (OAK-1932) - store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withMemoryMapping(false).build(); - assertTrue(store.size() > largeBinarySize); - store.cleanup(); - assertTrue(store.size() < largeBinarySize); - store.close(); - } - - @Test - public void testRecovery() throws Exception { - FileStore store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withMemoryMapping(false).build(); - store.flush(); - - RandomAccessFile data0 = new RandomAccessFile(new File(getFileStoreFolder(), "data00000a.tar"), "r"); - long pos0 = data0.length(); - - SegmentNodeState base = store.getHead(); - SegmentNodeBuilder builder = base.builder(); - builder.setProperty("step", "a"); - store.setHead(base, builder.getNodeState()); - store.flush(); - long pos1 = data0.length(); - data0.close(); - - base = store.getHead(); - builder = base.builder(); - builder.setProperty("step", "b"); - store.setHead(base, builder.getNodeState()); - store.close(); - - store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withMemoryMapping(false).build(); - assertEquals("b", store.getHead().getString("step")); - store.close(); - - RandomAccessFile file = new RandomAccessFile( - new File(getFileStoreFolder(), "data00000a.tar"), "rw"); - file.setLength(pos1); - file.close(); - - store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withMemoryMapping(false).build(); - assertEquals("a", store.getHead().getString("step")); - store.close(); - - file = new RandomAccessFile( - new File(getFileStoreFolder(), "data00000a.tar"), "rw"); - file.setLength(pos0); - file.close(); - - store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withMemoryMapping(false).build(); - assertFalse(store.getHead().hasProperty("step")); - store.close(); - } - - @Test - public void testRearrangeOldData() throws IOException { - new FileOutputStream(new File(getFileStoreFolder(), "data00000.tar")).close(); - new FileOutputStream(new File(getFileStoreFolder(), "data00010a.tar")).close(); - new FileOutputStream(new File(getFileStoreFolder(), "data00030.tar")).close(); - new FileOutputStream(new File(getFileStoreFolder(), "bulk00002.tar")).close(); - new FileOutputStream(new File(getFileStoreFolder(), "bulk00005a.tar")).close(); - - Map files = FileStore.collectFiles(getFileStoreFolder()); - assertEquals( - newArrayList(0, 1, 31, 32, 33), - newArrayList(newTreeSet(files.keySet()))); - - assertTrue(new File(getFileStoreFolder(), "data00000a.tar").isFile()); - assertTrue(new File(getFileStoreFolder(), "data00001a.tar").isFile()); - assertTrue(new File(getFileStoreFolder(), "data00031a.tar").isFile()); - assertTrue(new File(getFileStoreFolder(), "data00032a.tar").isFile()); - assertTrue(new File(getFileStoreFolder(), "data00033a.tar").isFile()); - - files = FileStore.collectFiles(getFileStoreFolder()); - assertEquals( - newArrayList(0, 1, 31, 32, 33), - newArrayList(newTreeSet(files.keySet()))); - } - - @Test // See OAK-2049 - public void segmentOverflow() throws Exception { - for (int n = 1; n < 255; n++) { // 255 = ListRecord.LEVEL_SIZE - FileStore store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withMemoryMapping(false).build(); - SegmentWriter writer = store.getTracker().getWriter(); - // writer.length == 32 (from the root node) - - // adding 15 strings with 16516 bytes each - for (int k = 0; k < 15; k++) { - // 16516 = (Segment.MEDIUM_LIMIT - 1 + 2 + 3) - // 1 byte per char, 2 byte to store the length and 3 bytes for the - // alignment to the integer boundary - writer.writeString(Strings.repeat("abcdefghijklmno".substring(k, k + 1), - Segment.MEDIUM_LIMIT - 1)); - } - - // adding 14280 bytes. 1 byte per char, and 2 bytes to store the length - RecordId x = writer.writeString(Strings.repeat("x", 14278)); - // writer.length == 262052 - - // Adding 765 bytes (255 recordIds) - // This should cause the current segment to flush - List list = Collections.nCopies(n, x); - writer.writeList(list); - - writer.flush(); - - // Don't close the store in a finally clause as if a failure happens - // this will also fail an cover up the earlier exception - store.close(); - } - } - - @Test - public void nonBlockingROStore() throws Exception { - FileStore store = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1).withMemoryMapping(false).build(); - store.flush(); // first 1kB - SegmentNodeState base = store.getHead(); - SegmentNodeBuilder builder = base.builder(); - builder.setProperty("step", "a"); - store.setHead(base, builder.getNodeState()); - store.flush(); // second 1kB - - ReadOnlyStore ro = null; - try { - ro = FileStore.builder(getFileStoreFolder()).buildReadOnly(); - assertEquals(store.getHead(), ro.getHead()); - } finally { - if (ro != null) { - ro.close(); - } - store.close(); - } - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreStatsTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreStatsTest.java deleted file mode 100644 index 77db0fb..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreStatsTest.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -import java.io.File; -import java.util.UUID; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; - -import org.apache.jackrabbit.oak.commons.concurrent.ExecutorCloser; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.stats.DefaultStatisticsProvider; -import org.apache.jackrabbit.oak.stats.StatisticsProvider; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import static com.google.common.base.Charsets.UTF_8; -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.mock; - -public class FileStoreStatsTest { - @Rule - public final TemporaryFolder segmentFolder = new TemporaryFolder(new File("target")); - - private FileStore fileStore; - private ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); - private StatisticsProvider statsProvider = new DefaultStatisticsProvider(executor); - - @Before - public void createFileStore() throws Exception { - BlobStore blobStore = mock(BlobStore.class); - fileStore = FileStore.builder(segmentFolder.newFolder()) - .withBlobStore(blobStore) - .withStatisticsProvider(statsProvider).build(); - } - - @After - public void shutDown(){ - fileStore.close(); - new ExecutorCloser(executor).close(); - } - - @Test - public void initCall() throws Exception{ - FileStoreStats stats = new FileStoreStats(statsProvider, fileStore, 1000); - assertEquals(1000, stats.getApproximateSize()); - - stats.written(500); - assertEquals(1500, stats.getApproximateSize()); - - stats.reclaimed(250); - assertEquals(1250, stats.getApproximateSize()); - - assertEquals(1, stats.getTarFileCount()); - } - - @Test - public void tarWriterIntegration() throws Exception{ - FileStoreStats stats = new FileStoreStats(statsProvider, fileStore, 0); - UUID id = UUID.randomUUID(); - long msb = id.getMostSignificantBits(); - long lsb = id.getLeastSignificantBits() & (-1 >>> 4); // OAK-1672 - byte[] data = "Hello, World!".getBytes(UTF_8); - - File file = segmentFolder.newFile(); - TarWriter writer = new TarWriter(file, stats); - writer.writeEntry(msb, lsb, data, 0, data.length); - writer.close(); - - assertEquals(stats.getApproximateSize(), file.length()); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreTest.java deleted file mode 100644 index a934a64..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/FileStoreTest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -import java.io.File; - -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -public class FileStoreTest { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - private File getFileStoreFolder() { - return folder.getRoot(); - } - - @Ignore("OAK-4054") // FIXME OAK-4054 - @Test - public void containsSegment() throws Exception { - FileStore fileStore = FileStore.builder(getFileStoreFolder()).build(); - try { - SegmentId id = new SegmentId(fileStore.getTracker(), 0, 0); - if (fileStore.containsSegment(id)) { - fileStore.readSegment(id); - } - } finally { - fileStore.close(); - } - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/JournalEntryTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/JournalEntryTest.java deleted file mode 100644 index 5ff0cc6..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/JournalEntryTest.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.io.File; -import java.nio.charset.Charset; -import java.util.List; - -import com.google.common.base.Splitter; -import com.google.common.io.Files; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -public class JournalEntryTest { - - @Rule - public TemporaryFolder tempFolder = new TemporaryFolder(new File("target")); - - @Test - public void timestampInJournalEntry() throws Exception{ - FileStore fileStore = FileStore.builder(tempFolder.getRoot()).withMaxFileSize(5) - .withNoCache().withMemoryMapping(true).build(); - - SegmentNodeStore nodeStore = SegmentNodeStore.builder(fileStore).build(); - - long startTime = System.currentTimeMillis(); - - for (int i = 0; i < 5; i++) { - NodeBuilder root = nodeStore.getRoot().builder(); - root.child("c"+i); - nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - fileStore.flush(); - } - - fileStore.close(); - - File journal = new File(tempFolder.getRoot(), "journal.log"); - List lines = Files.readLines(journal, Charset.defaultCharset()); - assertFalse(lines.isEmpty()); - - String line = lines.get(0); - List journalEntry = journalParts(line); - assertEquals(3, journalEntry.size()); - - long entryTime = Long.valueOf(journalEntry.get(2)); - assertTrue(entryTime >= startTime); - - JournalReader jr = new JournalReader(journal); - assertEquals(journalParts(lines.get(lines.size() - 1)).get(0), jr.iterator().next()); - jr.close(); - } - - private List journalParts(String line){ - return Splitter.on(' ').splitToList(line); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/JournalReaderTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/JournalReaderTest.java deleted file mode 100644 index 9f40631..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/JournalReaderTest.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static org.apache.commons.io.FileUtils.write; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.io.File; -import java.io.IOException; -import java.util.Iterator; - -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -public class JournalReaderTest { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - @Test - public void testEmpty() throws IOException { - JournalReader journalReader = createJournalReader(""); - try { - assertFalse(journalReader.iterator().hasNext()); - } finally { - journalReader.close(); - } - } - - @Test - public void testSingleton() throws IOException { - JournalReader journalReader = createJournalReader("one 1"); - try { - Iterator journal = journalReader.iterator(); - assertTrue(journal.hasNext()); - assertEquals("one", journal.next()); - assertFalse(journal.hasNext()); - } finally { - journalReader.close(); - } - } - - @Test - public void testMultiple() throws IOException { - JournalReader journalReader = createJournalReader("one 1\ntwo 2\nthree 3 456"); - try { - Iterator journal = journalReader.iterator(); - assertTrue(journal.hasNext()); - assertEquals("three", journal.next()); - assertTrue(journal.hasNext()); - assertEquals("two", journal.next()); - assertTrue(journal.hasNext()); - assertEquals("one", journal.next()); - assertFalse(journal.hasNext()); - } finally { - journalReader.close(); - } - } - - @Test - public void testSpaces() throws IOException { - JournalReader journalReader = createJournalReader("\n \n \n "); - try { - Iterator journal = journalReader.iterator(); - assertTrue(journal.hasNext()); - assertEquals("", journal.next()); - assertTrue(journal.hasNext()); - assertEquals("", journal.next()); - assertTrue(journal.hasNext()); - assertEquals("", journal.next()); - assertFalse(journal.hasNext()); - } finally { - journalReader.close(); - } - } - - @Test - public void testIgnoreInvalid() throws IOException { - JournalReader journalReader = createJournalReader("one 1\ntwo 2\ninvalid\nthree 3"); - try { - Iterator journal = journalReader.iterator(); - assertTrue(journal.hasNext()); - assertEquals("three", journal.next()); - assertTrue(journal.hasNext()); - assertEquals("two", journal.next()); - assertTrue(journal.hasNext()); - assertEquals("one", journal.next()); - assertFalse(journal.hasNext()); - } finally { - journalReader.close(); - } - } - - private JournalReader createJournalReader(String s) throws IOException { - File journalFile = folder.newFile("jrt"); - write(journalFile, s); - return new JournalReader(journalFile); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/LargeNumberOfPropertiesTestIT.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/LargeNumberOfPropertiesTestIT.java deleted file mode 100644 index 4ecefa4..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/LargeNumberOfPropertiesTestIT.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static org.junit.Assume.assumeTrue; - -import java.io.File; -import java.io.IOException; - -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - *

Tests verifying if the repository gets corrupted or not: {@code OAK-2481 IllegalStateException in TarMk with large number of properties}

- * - *

These tests are disabled by default due to their long running time. On the - * command line specify {@code -DLargeNumberOfPropertiesTestIT=true} to enable - * them.

- * - *

If you only want to run this test:
- * {@code mvn verify -Dsurefire.skip.ut=true -PintegrationTesting -Dit.test=LargeNumberOfPropertiesTestIT -DLargeNumberOfPropertiesTestIT=true} - *

- */ -public class LargeNumberOfPropertiesTestIT { - - private static final Logger LOG = LoggerFactory - .getLogger(LargeNumberOfPropertiesTestIT.class); - private static final boolean ENABLED = Boolean - .getBoolean(LargeNumberOfPropertiesTestIT.class.getSimpleName()); - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - private File getFileStoreFolder() { - return folder.getRoot(); - } - - @Before - public void setUp() throws IOException { - assumeTrue(ENABLED); - } - - @Test - public void corruption() throws Exception { - FileStore fileStore = FileStore.builder(getFileStoreFolder()).withMaxFileSize(5) - .withNoCache().withMemoryMapping(true).build(); - SegmentNodeStore nodeStore = SegmentNodeStore.builder(fileStore).build(); - - NodeBuilder root = nodeStore.getRoot().builder(); - - try { - NodeBuilder c = root.child("c" + System.currentTimeMillis()); - // i=26 hits the hard limit for the number of properties a node can - // have (262144) - for (int i = 0; i < 25; i++) { - LOG.debug("run {}/24", i); - for (int j = 0; j < 10000; j++) { - c.setProperty("int-" + i + "-" + j, i); - } - } - nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY); - } finally { - fileStore.close(); - } - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/ReversedLinesFileReaderTestParamBlockSize.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/ReversedLinesFileReaderTestParamBlockSize.java deleted file mode 100644 index 24c7e48..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/ReversedLinesFileReaderTestParamBlockSize.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static org.apache.jackrabbit.oak.plugins.segment.file.ReversedLinesReaderTestData.GBK_BIN; -import static org.apache.jackrabbit.oak.plugins.segment.file.ReversedLinesReaderTestData.WINDOWS_31J_BIN; -import static org.apache.jackrabbit.oak.plugins.segment.file.ReversedLinesReaderTestData.X_WINDOWS_949_BIN; -import static org.apache.jackrabbit.oak.plugins.segment.file.ReversedLinesReaderTestData.X_WINDOWS_950_BIN; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -import java.io.File; -import java.io.IOException; -import java.net.URISyntaxException; -import java.util.Arrays; -import java.util.Collection; - -import org.junit.After; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; - -/** - * Test checks symmetric behaviour with BufferedReader - * FIXME: this is mostly taken from a copy of org.apache.commons.io.input - * with a fix for IO-471. Replace again once commons-io has released a fixed version. - */ -@RunWith(Parameterized.class) -public class ReversedLinesFileReaderTestParamBlockSize { - - private static final String UTF_8 = "UTF-8"; - private static final String ISO_8859_1 = "ISO-8859-1"; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - private File createFile(byte[] data) throws IOException { - return ReversedLinesReaderTestData.createFile(folder.newFile(), data); - } - - @SuppressWarnings("boxing") - @Parameters // small and uneven block sizes are not used in reality but are good to show that the algorithm is solid - public static Collection blockSizes() { - return Arrays.asList(new Integer[][] { {1}, {3}, {8}, {256}, {4096} }); - } - - private ReversedLinesFileReader reversedLinesFileReader; - private final int testParamBlockSize; - - public ReversedLinesFileReaderTestParamBlockSize(Integer testWithBlockSize) { - testParamBlockSize = testWithBlockSize; - } - - // Strings are escaped in constants to avoid java source encoding issues (source file enc is UTF-8): - - // windows-31j characters - private static final String TEST_LINE_WINDOWS_31J_1 = "\u3041\u3042\u3043\u3044\u3045"; - private static final String TEST_LINE_WINDOWS_31J_2 = "\u660E\u8F38\u5B50\u4EAC"; - // gbk characters (Simplified Chinese) - private static final String TEST_LINE_GBK_1 = "\u660E\u8F38\u5B50\u4EAC"; - private static final String TEST_LINE_GBK_2 = "\u7B80\u4F53\u4E2D\u6587"; - // x-windows-949 characters (Korean) - private static final String TEST_LINE_X_WINDOWS_949_1 = "\uD55C\uAD6D\uC5B4"; - private static final String TEST_LINE_X_WINDOWS_949_2 = "\uB300\uD55C\uBBFC\uAD6D"; - // x-windows-950 characters (Traditional Chinese) - private static final String TEST_LINE_X_WINDOWS_950_1 = "\u660E\u8F38\u5B50\u4EAC"; - private static final String TEST_LINE_X_WINDOWS_950_2 = "\u7E41\u9AD4\u4E2D\u6587"; - - @After - public void closeReader() { - try { - reversedLinesFileReader.close(); - } catch(Exception e) { - // ignore - } - } - - @Test - public void testWindows31jFile() throws URISyntaxException, IOException { - File testFileWindows31J = createFile(WINDOWS_31J_BIN); - reversedLinesFileReader = new ReversedLinesFileReader(testFileWindows31J, testParamBlockSize, "windows-31j"); - assertEqualsAndNoLineBreaks(TEST_LINE_WINDOWS_31J_2, reversedLinesFileReader.readLine()); - assertEqualsAndNoLineBreaks(TEST_LINE_WINDOWS_31J_1, reversedLinesFileReader.readLine()); - } - - @Test - public void testGBK() throws URISyntaxException, IOException { - File testFileGBK = createFile(GBK_BIN); - reversedLinesFileReader = new ReversedLinesFileReader(testFileGBK, testParamBlockSize, "GBK"); - assertEqualsAndNoLineBreaks(TEST_LINE_GBK_2, reversedLinesFileReader.readLine()); - assertEqualsAndNoLineBreaks(TEST_LINE_GBK_1, reversedLinesFileReader.readLine()); - } - - @Test - public void testxWindows949File() throws URISyntaxException, IOException { - File testFilexWindows949 = createFile(X_WINDOWS_949_BIN); - reversedLinesFileReader = new ReversedLinesFileReader(testFilexWindows949, testParamBlockSize, "x-windows-949"); - assertEqualsAndNoLineBreaks(TEST_LINE_X_WINDOWS_949_2, reversedLinesFileReader.readLine()); - assertEqualsAndNoLineBreaks(TEST_LINE_X_WINDOWS_949_1, reversedLinesFileReader.readLine()); - } - - @Test - public void testxWindows950File() throws URISyntaxException, IOException { - File testFilexWindows950 = createFile(X_WINDOWS_950_BIN); - reversedLinesFileReader = new ReversedLinesFileReader(testFilexWindows950, testParamBlockSize, "x-windows-950"); - assertEqualsAndNoLineBreaks(TEST_LINE_X_WINDOWS_950_2, reversedLinesFileReader.readLine()); - assertEqualsAndNoLineBreaks(TEST_LINE_X_WINDOWS_950_1, reversedLinesFileReader.readLine()); - } - - static void assertEqualsAndNoLineBreaks(String msg, String expected, String actual) { - if(actual!=null) { - assertFalse("Line contains \\n: line="+actual, actual.contains("\n")); - assertFalse("Line contains \\r: line="+actual, actual.contains("\r")); - } - assertEquals(msg, expected, actual); - } - static void assertEqualsAndNoLineBreaks(String expected, String actual) { - assertEqualsAndNoLineBreaks(null, expected, actual); - } -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/ReversedLinesFileReaderTestParamFile.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/ReversedLinesFileReaderTestParamFile.java deleted file mode 100644 index 7b0b377..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/ReversedLinesFileReaderTestParamFile.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - - -import static org.apache.jackrabbit.oak.plugins.segment.file.ReversedLinesReaderTestData.GBK_BIN; -import static org.apache.jackrabbit.oak.plugins.segment.file.ReversedLinesReaderTestData.WINDOWS_31J_BIN; -import static org.apache.jackrabbit.oak.plugins.segment.file.ReversedLinesReaderTestData.X_WINDOWS_949_BIN; -import static org.apache.jackrabbit.oak.plugins.segment.file.ReversedLinesReaderTestData.X_WINDOWS_950_BIN; -import static org.apache.jackrabbit.oak.plugins.segment.file.ReversedLinesReaderTestData.createFile; -import static org.junit.Assert.assertEquals; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStreamReader; -import java.net.URISyntaxException; -import java.util.Arrays; -import java.util.Collection; -import java.util.Stack; - -import org.junit.After; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; - -/** - * Test checks symmetric behaviour with BufferedReader - * FIXME: this is mostly taken from a copy of org.apache.commons.io.input - * with a fix for IO-471. Replace again once commons-io has released a fixed version. - */ -@RunWith(Parameterized.class) -public class ReversedLinesFileReaderTestParamFile { - - @Parameters - public static Collection blockSizes() { - return Arrays.asList(new Object[][] { - {WINDOWS_31J_BIN, "windows-31j", null}, - {GBK_BIN, "gbk", null}, - {X_WINDOWS_949_BIN, "x-windows-949", null}, - {X_WINDOWS_950_BIN, "x-windows-950", null}, - }); - } - - private ReversedLinesFileReader reversedLinesFileReader; - private BufferedReader bufferedReader; - - private final byte[] data; - private final String encoding; - private final int buffSize; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - public ReversedLinesFileReaderTestParamFile(byte[] data, String encoding, Integer buffSize) { - this.data = data; - this.encoding = encoding; - this.buffSize = buffSize == null ? 4096 : buffSize; - } - - @Test - public void testDataIntegrityWithBufferedReader() throws URISyntaxException, IOException { - File testFileIso = createFile(folder.newFile(), data); - reversedLinesFileReader = new ReversedLinesFileReader(testFileIso, buffSize, encoding); - - Stack lineStack = new Stack(); - - bufferedReader = new BufferedReader(new InputStreamReader(new FileInputStream(testFileIso), encoding)); - String line; - - // read all lines in normal order - while((line = bufferedReader.readLine())!=null) { - lineStack.push(line); - } - - // read in reverse order and compare with lines from stack - while((line = reversedLinesFileReader.readLine())!=null) { - String lineFromBufferedReader = lineStack.pop(); - assertEquals(lineFromBufferedReader, line); - } - - } - - @After - public void closeReader() { - try { - bufferedReader.close(); - } catch(Exception e) { - // ignore - } - try { - reversedLinesFileReader.close(); - } catch(Exception e) { - // ignore - } - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/ReversedLinesReaderTestData.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/ReversedLinesReaderTestData.java deleted file mode 100644 index f3a9665..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/ReversedLinesReaderTestData.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; - -/** - * Test checks symmetric behaviour with BufferedReader - * FIXME: this is mostly taken from a copy of org.apache.commons.io.input - * with a fix for IO-471. Replace again once commons-io has released a fixed version. - */ -public final class ReversedLinesReaderTestData { - private ReversedLinesReaderTestData() {} - - public static final byte[] WINDOWS_31J_BIN = new byte[]{ - -126, -97, -126, -96, -126, -95, -126, -94, -126, -93, 13, 10, -106, -66, -105, 65, -114, - 113, -117, -98, 13, 10, - }; - - public static final byte[] GBK_BIN = new byte[]{ - -61, -9, -35, -108, -41, -45, -66, -87, 13, 10, -68, -14, -52, -27, -42, -48, -50, -60, - 13, 10, - }; - - public static final byte[] X_WINDOWS_949_BIN = new byte[]{ - -57, -47, -79, -71, -66, -18, 13, 10, -76, -21, -57, -47, -71, -50, -79, -71, 13, 10, - }; - - public static final byte[] X_WINDOWS_950_BIN = new byte[]{ - -87, -6, -65, -23, -92, 108, -88, -54, 13, 10, -63, 99, -59, -23, -92, -92, -92, -27, - 13, 10, - }; - - public static File createFile(File file, byte[] data) throws IOException { - FileOutputStream os = new FileOutputStream(file); - try { - os.write(data); - return file; - } finally { - os.close(); - } - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/SegmentReferenceLimitTestIT.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/SegmentReferenceLimitTestIT.java deleted file mode 100644 index 2009ed5..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/SegmentReferenceLimitTestIT.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static org.junit.Assume.assumeTrue; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.FutureTask; - -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - *

Tests verifying if the repository gets corrupted or not: {@code OAK-2294 Corrupt repository after concurrent version operations}

- * - *

These tests are disabled by default due to their long running time. On the - * command line specify {@code -DSegmentReferenceLimitTestIT=true} to enable - * them.

- * - *

If you only want to run this test:
- * {@code mvn verify -Dsurefire.skip.ut=true -PintegrationTesting -Dit.test=SegmentReferenceLimitTestIT -DSegmentReferenceLimitTestIT=true} - *

- */ -public class SegmentReferenceLimitTestIT { - - private static final Logger LOG = LoggerFactory - .getLogger(SegmentReferenceLimitTestIT.class); - private static final boolean ENABLED = Boolean - .getBoolean(SegmentReferenceLimitTestIT.class.getSimpleName()); - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - private File getFileStoreFolder() { - return folder.getRoot(); - } - - @Before - public void setUp() throws IOException { - assumeTrue(ENABLED); - } - - @Test - public void corruption() throws Exception { - FileStore fileStore = FileStore.builder(getFileStoreFolder()).withMaxFileSize(1) - .withNoCache().withMemoryMapping(true).build(); - SegmentNodeStore nodeStore = SegmentNodeStore.builder(fileStore).build(); - - NodeBuilder root = nodeStore.getRoot().builder(); - root.setChildNode("test"); - nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY); - - List> l = new ArrayList>(); - for (int i = 0; i < 10; i++) { - l.add(run(new Worker(nodeStore, "w" + i))); - } - - try { - for (FutureTask w : l) { - w.get(); - } - } finally { - fileStore.close(); - } - } - - private static FutureTask run(Callable callable) { - FutureTask task = new FutureTask(callable); - new Thread(task).start(); - return task; - } - - private static class Worker implements Callable { - private final NodeStore nodeStore; - private final String name; - - private Worker(NodeStore nodeStore, String name) { - this.nodeStore = nodeStore; - this.name = name; - } - - @Override - public Void call() throws Exception { - for (int k = 0; k < 400; k++) { - NodeBuilder root = nodeStore.getRoot().builder(); - root.getChildNode("test").setProperty(name + ' ' + k, name + " value " + k); - nodeStore.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY); - } - return null; - } - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/TarFileTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/TarFileTest.java deleted file mode 100644 index 10a6a67..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/TarFileTest.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static com.google.common.base.Charsets.UTF_8; -import static junit.framework.Assert.assertEquals; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.UUID; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -public class TarFileTest { - - private File file; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - @Before - public void setUp() throws IOException { - file = folder.newFile(); - } - - @Test - public void testWriteAndRead() throws IOException { - UUID id = UUID.randomUUID(); - long msb = id.getMostSignificantBits(); - long lsb = id.getLeastSignificantBits() & (-1 >>> 4); // OAK-1672 - byte[] data = "Hello, World!".getBytes(UTF_8); - - TarWriter writer = new TarWriter(file); - try { - writer.writeEntry(msb, lsb, data, 0, data.length); - assertEquals(ByteBuffer.wrap(data), writer.readEntry(msb, lsb)); - } finally { - writer.close(); - } - - assertEquals(4096, file.length()); - - TarReader reader = TarReader.open(file, false); - try { - assertEquals(ByteBuffer.wrap(data), reader.readEntry(msb, lsb)); - } finally { - reader.close(); - } - - reader = TarReader.open(file, false); - try { - assertEquals(ByteBuffer.wrap(data), reader.readEntry(msb, lsb)); - } finally { - reader.close(); - } - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/TarWriterTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/TarWriterTest.java deleted file mode 100644 index ae33145..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/file/TarWriterTest.java +++ /dev/null @@ -1,241 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.file; - -import static com.google.common.collect.Maps.newHashMap; -import static com.google.common.collect.Sets.newHashSet; -import static java.nio.ByteBuffer.allocate; -import static java.util.Collections.singleton; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentVersion.V_11; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -import com.google.common.collect.ImmutableList; -import org.apache.jackrabbit.oak.plugins.segment.RecordId; -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentWriter; -import org.apache.jackrabbit.oak.plugins.segment.file.TarWriterTest.SegmentGraphBuilder.Node; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -public class TarWriterTest { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(new File("target")); - - /** - * Regression test for OAK-2800 - */ - @Test - public void collectReferences() throws IOException { - SegmentGraphBuilder graphBuilder = new SegmentGraphBuilder(); - - // a -> b -> c - Node c = graphBuilder.createNode("c"); - Node b = graphBuilder.createNode("b", c); - Node a = graphBuilder.createNode("a", b); - - Node n = graphBuilder.createNode("n"); - - // y -> z - Node z = graphBuilder.createNode("z"); - Node y = graphBuilder.createNode("y", z); - - assertEquals(singleton(b), a.getReferences()); - assertEquals(singleton(c), b.getReferences()); - assertTrue(c.getReferences().isEmpty()); - assertEquals(singleton(z), y.getReferences()); - assertTrue(z.getReferences().isEmpty()); - - File tar = folder.newFile(getClass().getName() + ".tar"); - TarWriter tarWriter = new TarWriter(tar); - try { - y.write(tarWriter); - b.write(tarWriter); - a.write(tarWriter); - n.write(tarWriter); - - Set references = newHashSet(); - references.add(a.getUUID()); - tarWriter.collectReferences(references); - assertEquals( - c + " must be in references as " + a + " has an transitive reference to " + c + " through " + b + ", " + - a + " must not be in references as " + a + " is in the TarWriter, " + - "no other elements must be in references.", - singleton(c), toNodes(graphBuilder, references)); - - references.clear(); - references.add(b.getUUID()); - tarWriter.collectReferences(references); - assertEquals( - b + " must be in references as " + a + " has a direct reference to " + b + ", " + - a + " must not be in references as " + a + " is in the TarWriter, " + - "no other elements must be in references.", - singleton(c), toNodes(graphBuilder, references)); - - references.clear(); - references.add(y.getUUID()); - tarWriter.collectReferences(references); - assertEquals( - z + " must be in references as " + y + " has a direct reference to " + z + ", " + - y + " must not be in references as " + y + " is in the TarWriter, " + - "no other elements must be in references.", - singleton(z), toNodes(graphBuilder, references)); - - references.clear(); - references.add(c.getUUID()); - tarWriter.collectReferences(references); - assertEquals( - c + " must be in references as " + c + " is not in the TarWriter, " + - "no other elements must be in references.", - singleton(c), toNodes(graphBuilder, references)); - - references.clear(); - references.add(z.getUUID()); - tarWriter.collectReferences(references); - assertEquals( - z + " must be in references as " + z + " is not in the TarWriter " + - "no other elements must be in references.", - singleton(z), toNodes(graphBuilder, references)); - - references.clear(); - references.add(n.getUUID()); - tarWriter.collectReferences(references); - assertTrue( - "references must be empty as " + n + " has no references " + - "and " + n + " is in the TarWriter", - references.isEmpty()); - } finally { - tarWriter.close(); - } - } - - private static Set toNodes(SegmentGraphBuilder graphBuilder, Set uuids) { - Set nodes = newHashSet(); - for (UUID uuid : uuids) { - nodes.add(graphBuilder.getNode(uuid)); - } - return nodes; - } - - public static class SegmentGraphBuilder { - private final Map segments = newHashMap(); - private final Map nodes = newHashMap(); - - private final SegmentStore store; - private final SegmentWriter writer; - - private int nextNodeNo; - - public SegmentGraphBuilder() throws IOException { - store = new MemoryStore() { - @Override - public void writeSegment(SegmentId id, byte[] data, int offset, int length) throws IOException { - super.writeSegment(id, data, offset, length); - ByteBuffer buffer = allocate(length); - buffer.put(data, offset, length); - buffer.rewind(); - segments.put(id, buffer); - } - }; - writer = new SegmentWriter(store, V_11, ""); - } - - public class Node { - final String name; - final RecordId selfId; - final byte[] data; - final Segment segment; - - Node(String name, RecordId selfId, ByteBuffer data) { - this.name = name; - this.selfId = selfId; - this.data = data.array(); - segment = new Segment(store.getTracker(), selfId.getSegmentId(), data); - } - - public void write(TarWriter tarWriter) throws IOException { - long msb = getSegmentId().getMostSignificantBits(); - long lsb = getSegmentId().getLeastSignificantBits(); - tarWriter.writeEntry(msb, lsb, data, 0, data.length); - } - - public UUID getUUID() { - return newUUID(getSegmentId()); - } - - private SegmentId getSegmentId() { - return selfId.getSegmentId(); - } - - public Set getReferences() { - Set references = newHashSet(); - for (SegmentId segmentId : segment.getReferencedIds()) { - references.add(nodes.get(newUUID(segmentId))); - } - references.remove(this); - return references; - } - - @Override - public String toString() { - return name; - } - - void addReference(SegmentWriter writer) throws IOException { - // Need to write a proper list as singleton lists are optimised - // to just returning the recordId of its single element - writer.writeList(ImmutableList.of(selfId, selfId)); - } - } - - public Node createNode(String name, Node... refs) throws IOException { - RecordId selfId = writer.writeString("id-" + nextNodeNo++); - for (Node ref : refs) { - ref.addReference(writer); - } - writer.flush(); - SegmentId segmentId = selfId.getSegmentId(); - Node node = new Node(name, selfId, segments.get(segmentId)); - nodes.put(newUUID(segmentId), node); - return node; - } - - public Node getNode(UUID uuid) { - return nodes.get(uuid); - } - - private static UUID newUUID(SegmentId segmentId) { - return new UUID(segmentId.getMostSignificantBits(), segmentId.getLeastSignificantBits()); - } - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/fixture/SegmentFixture.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/fixture/SegmentFixture.java deleted file mode 100644 index 9e07e1c..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/fixture/SegmentFixture.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.fixture; - -import java.io.IOException; - -import org.apache.jackrabbit.oak.fixture.NodeStoreFixture; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; -import org.apache.jackrabbit.oak.spi.state.NodeStore; - -@Deprecated -public class SegmentFixture extends NodeStoreFixture { - - private final SegmentStore store; - - public SegmentFixture() { - this(null); - } - - public SegmentFixture(SegmentStore store) { - this.store = store; - } - - @Override - public NodeStore createNodeStore() { - if (store == null) { - try { - return SegmentNodeStore.builder(new MemoryStore()).build(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } else { - return SegmentNodeStore.builder(store).build(); - } - } - - @Override - public String toString() { - return "SegmentNodeStore"; - } -} \ No newline at end of file diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/migration/DepthFirstNodeIteratorTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/migration/DepthFirstNodeIteratorTest.java deleted file mode 100644 index e7c0be8..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/migration/DepthFirstNodeIteratorTest.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.migration; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.plugins.blob.migration.DepthFirstNodeIterator; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.Before; -import org.junit.Test; - -public class DepthFirstNodeIteratorTest { - - private NodeStore store; - - @Before - public void setup() throws CommitFailedException, IOException { - store = SegmentNodeStore.builder(new MemoryStore()).build(); - NodeBuilder rootBuilder = store.getRoot().builder(); - NodeBuilder countries = rootBuilder.child("countries"); - countries.child("uk").child("cities").child("london").child("districts").child("frognal"); - countries.child("germany"); - countries.child("france").child("cities").child("paris"); - store.merge(rootBuilder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - } - - // The order of the returned nodes is not defined, that's why we have to - // create 3 subtrees. - @Test - public void testIterate() { - Map subtrees = new HashMap(); - subtrees.put("uk", new String[] { "cities", "london", "districts", "frognal" }); - subtrees.put("germany", new String[] {}); - subtrees.put("france", new String[] { "cities", "paris" }); - - DepthFirstNodeIterator iterator = new DepthFirstNodeIterator(store.getRoot()); - assertTrue(iterator.hasNext()); - assertEquals("countries", iterator.next().getName()); - - for (int i = 0; i < 3; i++) { - assertTrue(iterator.hasNext()); - String country = iterator.next().getName(); - for (String node : subtrees.remove(country)) { - assertTrue(iterator.hasNext()); - assertEquals(node, iterator.next().getName()); - } - } - assertFalse(iterator.hasNext()); - assertTrue(subtrees.isEmpty()); - } - - @Test - public void testGetPath() { - Map nameToPath = new HashMap(); - nameToPath.put("countries", "/countries"); - nameToPath.put("uk", "/countries/uk"); - nameToPath.put("frognal", "/countries/uk/cities/london/districts/frognal"); - nameToPath.put("paris", "/countries/france/cities/paris"); - - DepthFirstNodeIterator iterator = new DepthFirstNodeIterator(store.getRoot()); - while (iterator.hasNext()) { - String expectedPath = nameToPath.remove(iterator.next().getName()); - if (expectedPath == null) { - continue; - } - assertEquals(expectedPath, iterator.getPath()); - } - assertTrue(nameToPath.isEmpty()); - } -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/migration/ExternalToExternalMigrationTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/migration/ExternalToExternalMigrationTest.java deleted file mode 100644 index 281ce76..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/migration/ExternalToExternalMigrationTest.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.migration; - -import java.io.File; -import java.io.IOException; - -import org.apache.jackrabbit.oak.plugins.blob.migration.AbstractMigratorTest; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.spi.blob.FileBlobStore; -import org.apache.jackrabbit.oak.spi.state.NodeStore; - -public class ExternalToExternalMigrationTest extends AbstractMigratorTest { - - private SegmentStore segmentStore; - - @Override - protected NodeStore createNodeStore(BlobStore blobStore, File repository) throws IOException { - File segmentDir = new File(repository, "segmentstore"); - - try { - segmentStore = FileStore.builder(segmentDir).withBlobStore(blobStore).build(); - } catch (InvalidFileStoreVersionException e) { - throw new IllegalStateException(e); - } - - return SegmentNodeStore.builder(segmentStore).build(); - } - - @Override - protected void closeNodeStore() { - segmentStore.close(); - } - - @Override - protected BlobStore createOldBlobStore(File repository) { - return new FileBlobStore(repository.getPath() + "/old"); - } - - @Override - protected BlobStore createNewBlobStore(File repository) { - return new FileBlobStore(repository.getPath() + "/new"); - } - -} diff --git oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/migration/SegmentToExternalMigrationTest.java oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/migration/SegmentToExternalMigrationTest.java deleted file mode 100644 index d128afe..0000000 --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/migration/SegmentToExternalMigrationTest.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.migration; - -import java.io.File; -import java.io.IOException; - -import org.apache.jackrabbit.oak.plugins.blob.migration.AbstractMigratorTest; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.file.InvalidFileStoreVersionException; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.apache.jackrabbit.oak.spi.blob.FileBlobStore; -import org.apache.jackrabbit.oak.spi.state.NodeStore; - -public class SegmentToExternalMigrationTest extends AbstractMigratorTest { - - private SegmentStore segmentStore; - - @Override - protected NodeStore createNodeStore(BlobStore blobStore, File repository) throws IOException { - File segmentDir = new File(repository, "segmentstore"); - FileStore.Builder builder = FileStore.builder(segmentDir); - if (blobStore != null) { - builder.withBlobStore(blobStore); - } - - try { - segmentStore = builder.build(); - } catch (InvalidFileStoreVersionException e) { - throw new IllegalStateException(e); - } - - return SegmentNodeStore.builder(segmentStore).build(); - } - - @Override - protected void closeNodeStore() { - segmentStore.close(); - } - - @Override - protected BlobStore createOldBlobStore(File repository) { - return null; - } - - @Override - protected BlobStore createNewBlobStore(File repository) { - return new FileBlobStore(repository.getPath() + "/new"); - } - -} diff --git oak-segment/src/test/resources/aws.properties oak-segment/src/test/resources/aws.properties deleted file mode 100644 index 03cd919..0000000 --- oak-segment/src/test/resources/aws.properties +++ /dev/null @@ -1,38 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# AWS account ID -accessKey= -# AWS secret key -secretKey= -# AWS bucket name -s3Bucket=oakblobstore -# AWS bucket region -# Mapping of S3 regions to their constants -# US Standard us-standard -# US West us-west-2 -# US West (Northern California) us-west-1 -# EU (Ireland) EU -# Asia Pacific (Singapore) ap-southeast-1 -# Asia Pacific (Sydney) ap-southeast-2 -# Asia Pacific (Tokyo) ap-northeast-1 -# South America (Sao Paulo) sa-east-1 -s3Region=us-standard -connectionTimeout=120000 -socketTimeout=120000 -maxConnections=10 -maxErrorRetry=10 diff --git oak-segment/src/test/resources/logback-test.xml oak-segment/src/test/resources/logback-test.xml deleted file mode 100644 index 7a9abc4..0000000 --- oak-segment/src/test/resources/logback-test.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - %date{HH:mm:ss.SSS} %-5level %-40([%thread] %F:%L) %msg%n - - - - - target/unit-tests.log - - %date{HH:mm:ss.SSS} %-5level %-40([%thread] %F:%L) %msg%n - - - - - - - - - diff --git oak-segment/src/test/resources/logging.properties oak-segment/src/test/resources/logging.properties deleted file mode 100644 index 811faee..0000000 --- oak-segment/src/test/resources/logging.properties +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -handlers = org.slf4j.bridge.SLF4JBridgeHandler diff --git oak-segment/src/test/resources/org/apache/jackrabbit/oak/plugins/segment/file-store.zip oak-segment/src/test/resources/org/apache/jackrabbit/oak/plugins/segment/file-store.zip deleted file mode 100644 index fd716eb..0000000 Binary files oak-segment/src/test/resources/org/apache/jackrabbit/oak/plugins/segment/file-store.zip and /dev/null differ diff --git oak-solr-core/pom.xml oak-solr-core/pom.xml index e72cc80..8eecfcb 100644 --- oak-solr-core/pom.xml +++ oak-solr-core/pom.xml @@ -115,12 +115,6 @@ org.apache.jackrabbit - oak-segment - ${project.version} - test - - - org.apache.jackrabbit oak-segment-tar ${project.version} test diff --git oak-solr-core/src/test/java/org/apache/jackrabbit/oak/plugins/index/solr/SolrBaseTest.java oak-solr-core/src/test/java/org/apache/jackrabbit/oak/plugins/index/solr/SolrBaseTest.java index 5c167c0..a9170ac 100644 --- oak-solr-core/src/test/java/org/apache/jackrabbit/oak/plugins/index/solr/SolrBaseTest.java +++ oak-solr-core/src/test/java/org/apache/jackrabbit/oak/plugins/index/solr/SolrBaseTest.java @@ -29,8 +29,8 @@ import org.apache.jackrabbit.oak.plugins.index.solr.index.SolrIndexEditorProvide import org.apache.jackrabbit.oak.plugins.index.solr.query.SolrQueryIndexProvider; import org.apache.jackrabbit.oak.plugins.index.solr.util.SolrIndexInitializer; import org.apache.jackrabbit.oak.plugins.nodetype.write.InitialContent; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.memory.MemoryStore; import org.apache.jackrabbit.oak.spi.commit.EditorHook; import org.apache.jackrabbit.oak.spi.security.OpenSecurityProvider; import org.apache.jackrabbit.oak.spi.state.NodeStore; @@ -52,7 +52,7 @@ public abstract class SolrBaseTest { @Before public void setUp() throws Exception { - store = SegmentNodeStore.builder(new MemoryStore()).build(); + store = SegmentNodeStoreBuilders.builder(new MemoryStore()).build(); provider = new TestUtils(); server = provider.getSolrServer(); configuration = provider.getConfiguration(); diff --git oak-solr-core/src/test/java/org/apache/jackrabbit/oak/plugins/index/solr/configuration/nodestate/OakSolrNodeStateConfigurationTest.java oak-solr-core/src/test/java/org/apache/jackrabbit/oak/plugins/index/solr/configuration/nodestate/OakSolrNodeStateConfigurationTest.java index bbfb61e..e649af0 100644 --- oak-solr-core/src/test/java/org/apache/jackrabbit/oak/plugins/index/solr/configuration/nodestate/OakSolrNodeStateConfigurationTest.java +++ oak-solr-core/src/test/java/org/apache/jackrabbit/oak/plugins/index/solr/configuration/nodestate/OakSolrNodeStateConfigurationTest.java @@ -16,8 +16,11 @@ */ package org.apache.jackrabbit.oak.plugins.index.solr.configuration.nodestate; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.memory.MemoryStore; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders; +import org.apache.jackrabbit.oak.segment.memory.MemoryStore; import org.apache.jackrabbit.oak.spi.commit.CommitInfo; import org.apache.jackrabbit.oak.spi.commit.EmptyHook; import org.apache.jackrabbit.oak.spi.state.NodeBuilder; @@ -26,9 +29,6 @@ import org.apache.jackrabbit.oak.spi.state.NodeStore; import org.junit.Before; import org.junit.Test; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.fail; - /** * Tests for {@link org.apache.jackrabbit.oak.plugins.index.solr.configuration.nodestate.OakSolrNodeStateConfiguration} */ @@ -38,7 +38,7 @@ public class OakSolrNodeStateConfigurationTest { @Before public void setUp() throws Exception { - store = SegmentNodeStore.builder(new MemoryStore()).build(); + store = SegmentNodeStoreBuilders.builder(new MemoryStore()).build(); NodeBuilder builder = store.getRoot().builder(); builder.setProperty("a", 1) .setProperty("b", 2) diff --git oak-tarmk-standby/osgi-conf/primary/org.apache.jackrabbit.oak.plugins.segment.standby.store.StandbyStoreService.config oak-tarmk-standby/osgi-conf/primary/org.apache.jackrabbit.oak.plugins.segment.standby.store.StandbyStoreService.config deleted file mode 100644 index 097cd24..0000000 --- oak-tarmk-standby/osgi-conf/primary/org.apache.jackrabbit.oak.plugins.segment.standby.store.StandbyStoreService.config +++ /dev/null @@ -1,5 +0,0 @@ -org.apache.sling.installer.configuration.persist=B"false" -mode="primary" -port=I"8023" -primary.allowed-client-ip-ranges=["0.0.0.0-255.255.255.255"] -secure=B"false" \ No newline at end of file diff --git oak-tarmk-standby/osgi-conf/standby/org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStoreService.config oak-tarmk-standby/osgi-conf/standby/org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStoreService.config deleted file mode 100644 index 1c083b6..0000000 --- oak-tarmk-standby/osgi-conf/standby/org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStoreService.config +++ /dev/null @@ -1,3 +0,0 @@ -name="Oak-Tar" -service.ranking=I"100" -standby=B"true" \ No newline at end of file diff --git oak-tarmk-standby/osgi-conf/standby/org.apache.jackrabbit.oak.plugins.segment.standby.store.StandbyStoreService.config oak-tarmk-standby/osgi-conf/standby/org.apache.jackrabbit.oak.plugins.segment.standby.store.StandbyStoreService.config deleted file mode 100644 index a833ab3..0000000 --- oak-tarmk-standby/osgi-conf/standby/org.apache.jackrabbit.oak.plugins.segment.standby.store.StandbyStoreService.config +++ /dev/null @@ -1,6 +0,0 @@ -org.apache.sling.installer.configuration.persist=B"false" -mode="standby" -primary.host="127.0.0.1" -port=I"8023" -secure=B"false" -interval=I"5" \ No newline at end of file diff --git oak-tarmk-standby/pom.xml oak-tarmk-standby/pom.xml deleted file mode 100644 index 4eeb749..0000000 --- oak-tarmk-standby/pom.xml +++ /dev/null @@ -1,237 +0,0 @@ - - - - - - 4.0.0 - - - org.apache.jackrabbit - oak-parent - 1.8-SNAPSHOT - ../oak-parent/pom.xml - - - oak-tarmk-standby - Oak TarMK Standby - bundle - Oak TarMK standby module - - - 4.0.23.Final - - - - - - org.apache.felix - maven-bundle-plugin - - - - com.google.protobuf.*;resolution:=optional, - com.jcraft.jzlib.*;resolution:=optional, - javassist.*;resolution:=optional, - org.apache.tomcat.jni.*;resolution:=optional, - org.bouncycastle.*;resolution:=optional, - org.eclipse.jetty.npn.*;resolution:=optional, - org.jboss.marshalling.*;resolution:=optional, - sun.misc.*;resolution:=optional, - sun.nio.ch.*;resolution:=optional, - sun.security.util.*;resolution:=optional, - sun.security.x509.*;resolution:=optional, - * - - netty-*;inline=true - !org.apache.jackrabbit.oak.plugins.segment.standby.* - - - - - org.apache.felix - maven-scr-plugin - - - org.codehaus.mojo - build-helper-maven-plugin - 1.9 - - - reserve-network-port - - reserve-network-port - - process-test-resources - - - standby.server.port - standby.proxy.port - - - - - - - maven-surefire-plugin - - - ${standby.server.port} - ${standby.proxy.port} - - - - **/BulkTest.java - **/MBeanTest.java - **/FailoverIPRangeTest.java - - - - - maven-failsafe-plugin - - - ${standby.server.port} - ${standby.proxy.port} - - - - - org.apache.rat - apache-rat-plugin - - - **/org.apache.jackrabbit.oak.plugins.segment.standby.store.StandbyStoreService.config - **/org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStoreService.config - **/org.apache.jackrabbit.oak.plugins.segment.standby.store.StandbyStoreService.config - - - - - - - - - - org.osgi - org.osgi.core - provided - - - org.osgi - org.osgi.compendium - provided - - - biz.aQute.bnd - bndlib - provided - - - org.apache.felix - org.apache.felix.scr.annotations - provided - - - - org.apache.jackrabbit - oak-core - ${project.version} - provided - - - org.apache.jackrabbit - oak-segment - ${project.version} - provided - - - org.apache.jackrabbit - oak-blob - ${project.version} - provided - - - - io.netty - netty-common - ${netty-version} - provided - - - io.netty - netty-buffer - ${netty-version} - provided - - - io.netty - netty-transport - ${netty-version} - provided - - - io.netty - netty-codec - ${netty-version} - provided - - - io.netty - netty-handler - ${netty-version} - provided - - - - - org.slf4j - slf4j-api - - - - - com.google.code.findbugs - jsr305 - - - - - org.apache.jackrabbit - oak-commons - tests - ${project.version} - test - - - junit - junit - test - - - org.apache.commons - commons-lang3 - 3.3.2 - test - - - ch.qos.logback - logback-classic - test - - - org.apache.jackrabbit - jackrabbit-data - ${jackrabbit.version} - test - - - diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/FailedRequestListener.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/FailedRequestListener.java deleted file mode 100644 index e98e69b..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/FailedRequestListener.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby.client; - -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; -import io.netty.util.concurrent.Promise; - -@Deprecated -public class FailedRequestListener implements ChannelFutureListener { - - private final Promise promise; - - @Deprecated - public FailedRequestListener(Promise promise) { - this.promise = promise; - } - - @Override - @Deprecated - public void operationComplete(ChannelFuture future) throws Exception { - if (!future.isSuccess()) { - promise.setFailure(future.cause()); - future.channel().close(); - } else { - future.channel().read(); - } - } -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/SegmentLoaderHandler.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/SegmentLoaderHandler.java deleted file mode 100644 index 3eb5a3d..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/SegmentLoaderHandler.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby.client; - -import static org.apache.jackrabbit.oak.commons.IOUtils.humanReadableByteCount; -import static org.apache.jackrabbit.oak.plugins.segment.standby.codec.Messages.newGetBlobReq; -import static org.apache.jackrabbit.oak.plugins.segment.standby.codec.Messages.newGetSegmentReq; - -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.plugins.segment.RecordId; -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeBuilder; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNotFoundException; -import org.apache.jackrabbit.oak.plugins.segment.standby.codec.SegmentReply; -import org.apache.jackrabbit.oak.plugins.segment.standby.store.RemoteSegmentLoader; -import org.apache.jackrabbit.oak.plugins.segment.standby.store.StandbyStore; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Deprecated -public class SegmentLoaderHandler extends ChannelInboundHandlerAdapter implements RemoteSegmentLoader { - - private static final Logger log = LoggerFactory.getLogger(SegmentLoaderHandler.class); - - private final StandbyStore store; - private final String clientID; - private final RecordId head; - private final AtomicBoolean running; - private final int readTimeoutMs; - private final boolean autoClean; - - private volatile ChannelHandlerContext ctx; - - private final BlockingQueue segment = new LinkedBlockingQueue(); - - // Use a separate thread for sync'ing. Leave the I/O thread free to process - // I/O requests. - private ExecutorService syncExecutor; - - @Deprecated - public SegmentLoaderHandler(StandbyStore store, RecordId head, String clientID, AtomicBoolean running, int readTimeoutMs, boolean autoClean) { - this.store = store; - this.head = head; - this.clientID = clientID; - this.running = running; - this.readTimeoutMs = readTimeoutMs; - this.autoClean = autoClean; - this.syncExecutor = Executors.newSingleThreadExecutor(); - } - - @Override - @Deprecated - public void handlerAdded(ChannelHandlerContext ctx) throws Exception { - this.ctx = ctx; - } - - @Override - @Deprecated - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - log.error("Exception caught, closing channel.", cause); - close(); - } - - @Override - @Deprecated - public void channelInactive(ChannelHandlerContext ctx) throws Exception { - syncExecutor.shutdown(); - syncExecutor = null; - } - - @Override - @Deprecated - public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { - if (evt instanceof SegmentReply) { - onSegmentReply((SegmentReply) evt); - } - - if (evt instanceof String) { - onCommand((String) evt); - } - } - - private void onSegmentReply(SegmentReply reply) { - // Offer the reply from the I/O thread, unblocking the sync thread. - segment.offer(reply); - } - - private void onCommand(String command) { - if (command.equals("sync")) { - syncExecutor.submit(new Runnable() { - - @Override - public void run() { - sync(); - } - - }); - } - } - - private void sync() { - log.debug("new head id " + head); - long t = System.currentTimeMillis(); - long preSyncSize = -1; - if (autoClean) { - preSyncSize = store.size(); - } - - try { - store.preSync(this); - SegmentNodeState before = store.getHead(); - SegmentNodeBuilder builder = before.builder(); - - SegmentNodeState current = new SegmentNodeState(head); - do { - try { - current.compareAgainstBaseState(before, - new StandbyApplyDiff(builder, store, this)); - break; - } catch (SegmentNotFoundException e) { - // the segment is locally damaged or not present anymore - // lets try to read this from the primary again - String id = e.getSegmentId(); - Segment s = readSegment(e.getSegmentId()); - if (s == null) { - log.warn("can't read locally corrupt segment " + id + " from primary"); - throw e; - } - - log.debug("did reread locally corrupt segment " + id + " with size " + s.size()); - store.persist(s.getSegmentId(), s); - } - } while(true); - boolean ok = store.setHead(before, builder.getNodeState()); - log.debug("updated head state successfully: {} in {}ms.", ok, - System.currentTimeMillis() - t); - - if (autoClean && preSyncSize > 0) { - long postSyncSize = store.size(); - // if size gain is over 25% call cleanup - if (postSyncSize - preSyncSize > 0.25 * preSyncSize) { - log.info( - "Store size increased from {} to {}, will run cleanup.", - humanReadableByteCount(preSyncSize), - humanReadableByteCount(postSyncSize)); - store.cleanup(); - } - } - } finally { - store.postSync(); - close(); - } - } - - @Override - @Deprecated - public Segment readSegment(final String id) { - // Use the I/O thread to write the request to the server - ctx.writeAndFlush(newGetSegmentReq(this.clientID, id)); - // Wait on the sync thread for the response. - return getSegment(id); - } - - @Override - @Deprecated - public Blob readBlob(String blobId) { - // Use the I/O thread to write the request to the server - ctx.writeAndFlush(newGetBlobReq(this.clientID, blobId)); - // Wait on the sync thread for the response. - return getBlob(blobId); - } - - private Segment getSegment(final String id) { - return getReply(id, SegmentReply.SEGMENT).getSegment(); - } - - private Blob getBlob(final String id) { - return getReply(id, SegmentReply.BLOB).getBlob(); - } - - private SegmentReply getReply(final String id, int type) { - boolean interrupted = false; - try { - for (;;) { - try { - // Block the sync thread for a response from the server. - SegmentReply r = segment.poll(readTimeoutMs, TimeUnit.MILLISECONDS); - - if (r == null) { - log.warn("timeout waiting for {}", id); - return SegmentReply.empty(); - } - - if (r.getType() == type) { - switch (r.getType()) { - case SegmentReply.SEGMENT: - if (r.getSegment().getSegmentId().toString() - .equals(id)) { - return r; - } - break; - case SegmentReply.BLOB: - if (r.getBlob().getBlobId().equals(id)) { - return r; - } - break; - } - } - } catch (InterruptedException ignore) { - interrupted = true; - } - } - } finally { - if (interrupted) { - Thread.currentThread().interrupt(); - } - } - } - - @Override - @Deprecated - public void close() { - ctx.close(); - } - - @Override - @Deprecated - public boolean isClosed() { - return !ctx.channel().isActive(); - } - - @Override - @Deprecated - public boolean isRunning() { - return running.get(); - } - -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/StandbyApplyDiff.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/StandbyApplyDiff.java deleted file mode 100644 index f712d03..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/StandbyApplyDiff.java +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby.client; - -import static org.apache.jackrabbit.oak.api.Type.BINARIES; -import static org.apache.jackrabbit.oak.api.Type.BINARY; - -import java.io.IOException; - -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState; -import org.apache.jackrabbit.oak.plugins.segment.RecordId; -import org.apache.jackrabbit.oak.plugins.segment.SegmentBlob; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.standby.store.RemoteSegmentLoader; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStateDiff; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -class StandbyApplyDiff implements NodeStateDiff { - - private static final Logger log = LoggerFactory - .getLogger(StandbyApplyDiff.class); - - private final NodeBuilder builder; - - private final SegmentStore store; - - private final boolean hasDataStore; - - private final RemoteSegmentLoader loader; - - private final String path; - - /** - * read-only traversal of the diff that has 2 properties: one is to log all - * the content changes, second is to drill down to properly level, so that - * missing binaries can be sync'ed if needed - */ - private final boolean logOnly; - - public StandbyApplyDiff(NodeBuilder builder, SegmentStore store, - RemoteSegmentLoader loader) { - this(builder, store, loader, "/", false); - } - - private StandbyApplyDiff(NodeBuilder builder, SegmentStore store, - RemoteSegmentLoader loader, String path, boolean logOnly) { - this.builder = builder; - this.store = store; - this.hasDataStore = store.getBlobStore() != null; - this.loader = loader; - this.path = path; - this.logOnly = logOnly; - } - - @Override - public boolean propertyAdded(PropertyState after) { - if (!loader.isRunning()) { - return false; - } - if (!logOnly) { - builder.setProperty(binaryCheck(after)); - } else { - binaryCheck(after); - } - return true; - } - - @Override - public boolean propertyChanged(PropertyState before, PropertyState after) { - if (!loader.isRunning()) { - return false; - } - if (!logOnly) { - builder.setProperty(binaryCheck(after)); - } else { - binaryCheck(after); - } - return true; - } - - @Override - public boolean propertyDeleted(PropertyState before) { - if (!loader.isRunning()) { - return false; - } - if (!logOnly) { - builder.removeProperty(before.getName()); - } - return true; - } - - private PropertyState binaryCheck(PropertyState property) { - Type type = property.getType(); - if (type == BINARY) { - binaryCheck(property.getValue(Type.BINARY), property.getName()); - } else if (type == BINARIES) { - for (Blob blob : property.getValue(BINARIES)) { - binaryCheck(blob, property.getName()); - } - } - return property; - } - - private void binaryCheck(Blob b, String pName) { - if (b instanceof SegmentBlob) { - SegmentBlob sb = (SegmentBlob) b; - // verify if the blob exists - if (sb.isExternal() && hasDataStore && b.getReference() == null) { - String blobId = sb.getBlobId(); - if (blobId != null) { - readBlob(blobId, pName); - } - } - } else { - log.warn("Unknown Blob {} at {}, ignoring", b.getClass().getName(), - path + "#" + pName); - } - } - - private void readBlob(String blobId, String pName) { - Blob read = loader.readBlob(blobId); - if (read != null) { - try { - store.getBlobStore().writeBlob(read.getNewStream()); - } catch (IOException f) { - throw new IllegalStateException("Unable to persist blob " - + blobId + " at " + path + "#" + pName, f); - } - } else { - throw new IllegalStateException("Unable to load remote blob " - + blobId + " at " + path + "#" + pName); - } - } - - @Override - public boolean childNodeAdded(String name, NodeState after) { - return process(name, "childNodeAdded", EmptyNodeState.EMPTY_NODE, - after); - } - - @Override - public boolean childNodeChanged(String name, NodeState before, - NodeState after) { - try { - return process(name, "childNodeChanged", before, after); - } catch (RuntimeException e) { - log.trace("Check binaries for node {} and retry to process childNodeChanged", name); - // Attempt to load the binaries and retry, see OAK-4969 - for (PropertyState propertyState : after.getProperties()) { - binaryCheck(propertyState); - } - return process(name, "childNodeChanged", before, after); - } - } - - private boolean process(String name, String op, NodeState before, - NodeState after) { - if (!loader.isRunning()) { - return false; - } - if (after instanceof SegmentNodeState) { - if (log.isTraceEnabled()) { - log.trace("{} {}, readonly binary check {}", op, path + name, - logOnly); - } - if (!logOnly) { - RecordId id = ((SegmentNodeState) after).getRecordId(); - builder.setChildNode(name, new SegmentNodeState(id)); - } - if ("checkpoints".equals(name)) { - // if we're on the /checkpoints path, there's no need for a deep - // traversal to verify binaries - return true; - } - if (hasDataStore) { - // has external datastore, we need a deep - // traversal to verify binaries - return after.compareAgainstBaseState(before, - new StandbyApplyDiff(builder.getChildNode(name), store, - loader, path + name + "/", true)); - } else { - return true; - } - } - return false; - } - - @Override - public boolean childNodeDeleted(String name, NodeState before) { - if (!loader.isRunning()) { - return false; - } - log.trace("childNodeDeleted {}, RO:{}", path + name, logOnly); - if (!logOnly) { - builder.getChildNode(name).remove(); - } - return true; - } -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/StandbyClient.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/StandbyClient.java deleted file mode 100644 index 1046cb4..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/StandbyClient.java +++ /dev/null @@ -1,286 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby.client; - -import java.io.Closeable; -import java.lang.management.ManagementFactory; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -import javax.management.MBeanServer; -import javax.management.ObjectName; -import javax.management.StandardMBean; -import javax.net.ssl.SSLException; - -import io.netty.bootstrap.Bootstrap; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.ChannelOption; -import io.netty.channel.ChannelPipeline; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.SocketChannel; -import io.netty.channel.socket.nio.NioSocketChannel; -import io.netty.handler.codec.compression.SnappyFramedDecoder; -import io.netty.handler.codec.string.StringEncoder; -import io.netty.handler.ssl.SslContext; -import io.netty.handler.ssl.util.InsecureTrustManagerFactory; -import io.netty.handler.timeout.ReadTimeoutHandler; -import io.netty.util.CharsetUtil; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.standby.codec.RecordIdDecoder; -import org.apache.jackrabbit.oak.plugins.segment.standby.jmx.ClientStandbyStatusMBean; -import org.apache.jackrabbit.oak.plugins.segment.standby.jmx.StandbyStatusMBean; -import org.apache.jackrabbit.oak.plugins.segment.standby.store.CommunicationObserver; -import org.apache.jackrabbit.oak.plugins.segment.standby.store.StandbyStore; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Deprecated -public final class StandbyClient implements ClientStandbyStatusMBean, Runnable, Closeable { - @Deprecated - public static final String CLIENT_ID_PROPERTY_NAME = "standbyID"; - - private static final Logger log = LoggerFactory - .getLogger(StandbyClient.class); - - private final String host; - private final int port; - private final int readTimeoutMs; - private final boolean autoClean; - - private final StandbyStore store; - private final CommunicationObserver observer; - private StandbyClientHandler handler; - private EventLoopGroup group; - private SslContext sslContext; - private boolean active = false; - private int failedRequests; - private long lastSuccessfulRequest; - private volatile String state; - private final Object sync = new Object(); - - private final AtomicBoolean running = new AtomicBoolean(true); - - private long syncStartTimestamp; - private long syncEndTimestamp; - - @Deprecated - public StandbyClient(String host, int port, SegmentStore store, - boolean secure, int readTimeoutMs, boolean autoClean) - throws SSLException { - this.state = STATUS_INITIALIZING; - this.lastSuccessfulRequest = -1; - this.syncStartTimestamp = -1; - this.syncEndTimestamp = -1; - this.failedRequests = 0; - this.host = host; - this.port = port; - if (secure) { - this.sslContext = SslContext.newClientContext(InsecureTrustManagerFactory.INSTANCE); - } - this.readTimeoutMs = readTimeoutMs; - this.autoClean = autoClean; - this.store = new StandbyStore(store); - String s = System.getProperty(CLIENT_ID_PROPERTY_NAME); - this.observer = new CommunicationObserver((s == null || s.length() == 0) ? UUID.randomUUID().toString() : s); - - final MBeanServer jmxServer = ManagementFactory.getPlatformMBeanServer(); - try { - jmxServer.registerMBean(new StandardMBean(this, ClientStandbyStatusMBean.class), new ObjectName(this.getMBeanName())); - } - catch (Exception e) { - log.error("can register standby status mbean", e); - } - } - - @Deprecated - public String getMBeanName() { - return StandbyStatusMBean.JMX_NAME + ",id=\"" + this.observer.getID() + "\""; - } - - @Deprecated - public void close() { - stop(); - state = STATUS_CLOSING; - final MBeanServer jmxServer = ManagementFactory.getPlatformMBeanServer(); - try { - jmxServer.unregisterMBean(new ObjectName(this.getMBeanName())); - } - catch (Exception e) { - log.error("can unregister standby status mbean", e); - } - observer.unregister(); - shutdownNetty(); - state = STATUS_CLOSED; - } - - @Deprecated - public void run() { - if (!isRunning()) { - // manually stopped - return; - } - - Bootstrap b; - synchronized (this.sync) { - if (this.active) { - return; - } - state = STATUS_STARTING; - handler = new StandbyClientHandler(this.store, observer, running, - readTimeoutMs, autoClean); - group = new NioEventLoopGroup(); - - b = new Bootstrap(); - b.group(group); - b.channel(NioSocketChannel.class); - b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, readTimeoutMs); - b.option(ChannelOption.TCP_NODELAY, true); - b.option(ChannelOption.SO_REUSEADDR, true); - b.option(ChannelOption.SO_KEEPALIVE, true); - - b.handler(new ChannelInitializer() { - @Override - public void initChannel(SocketChannel ch) throws Exception { - ChannelPipeline p = ch.pipeline(); - if (sslContext != null) { - p.addLast(sslContext.newHandler(ch.alloc())); - } - p.addLast("readTimeoutHandler", new ReadTimeoutHandler( - readTimeoutMs, TimeUnit.MILLISECONDS)); - p.addLast(new StringEncoder(CharsetUtil.UTF_8)); - p.addLast(new SnappyFramedDecoder(true)); - p.addLast(new RecordIdDecoder(store)); - p.addLast(handler); - } - }); - state = STATUS_RUNNING; - this.active = true; - } - - try { - long startTimestamp = System.currentTimeMillis(); - // Start the client. - ChannelFuture f = b.connect(host, port).sync(); - // Wait until the connection is closed. - f.channel().closeFuture().sync(); - this.failedRequests = 0; - this.syncStartTimestamp = startTimestamp; - this.syncEndTimestamp = System.currentTimeMillis(); - this.lastSuccessfulRequest = syncEndTimestamp / 1000; - } catch (Exception e) { - this.failedRequests++; - log.error("Failed synchronizing state.", e); - } finally { - synchronized (this.sync) { - this.active = false; - shutdownNetty(); - } - } - } - - private void shutdownNetty() { - if (handler != null) { - handler.close(); - handler = null; - } - if (group != null && !group.isShuttingDown()) { - group.shutdownGracefully(0, 1, TimeUnit.SECONDS).syncUninterruptibly(); - group = null; - } - } - - @Override - @Deprecated - public String getMode() { - return "client: " + this.observer.getID(); - } - - @Override - @Deprecated - public boolean isRunning() { - return running.get(); - } - - @Override - @Deprecated - public void start() { - running.set(true); - state = STATUS_RUNNING; - } - - @Override - @Deprecated - public void stop() { - running.set(false); - state = STATUS_STOPPED; - } - - @Override - @Deprecated - public String getStatus() { - return this.state; - } - - @Override - @Deprecated - public int getFailedRequests() { - return this.failedRequests; - } - - @Override - @Deprecated - public int getSecondsSinceLastSuccess() { - if (this.lastSuccessfulRequest < 0) return -1; - return (int)(System.currentTimeMillis() / 1000 - this.lastSuccessfulRequest); - } - - @Override - @Deprecated - public int calcFailedRequests() { - return this.getFailedRequests(); - } - - @Override - @Deprecated - public int calcSecondsSinceLastSuccess() { - return this.getSecondsSinceLastSuccess(); - } - - @Override - @Deprecated - public void cleanup() { - store.cleanup(); - } - - @Override - @Deprecated - public long getSyncStartTimestamp() { - return syncStartTimestamp; - } - - @Override - @Deprecated - public long getSyncEndTimestamp() { - return syncEndTimestamp; - } - -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/StandbyClientHandler.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/StandbyClientHandler.java deleted file mode 100644 index 80b61ea..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/StandbyClientHandler.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby.client; - -import static org.apache.jackrabbit.oak.plugins.segment.standby.codec.Messages.newGetHeadReq; - -import java.io.Closeable; -import java.util.concurrent.atomic.AtomicBoolean; - -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.SimpleChannelInboundHandler; -import io.netty.handler.timeout.ReadTimeoutHandler; -import org.apache.jackrabbit.oak.plugins.segment.RecordId; -import org.apache.jackrabbit.oak.plugins.segment.standby.codec.RecordIdDecoder; -import org.apache.jackrabbit.oak.plugins.segment.standby.codec.ReplyDecoder; -import org.apache.jackrabbit.oak.plugins.segment.standby.store.CommunicationObserver; -import org.apache.jackrabbit.oak.plugins.segment.standby.store.StandbyStore; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Deprecated -public class StandbyClientHandler extends SimpleChannelInboundHandler implements Closeable { - - private static final Logger log = LoggerFactory - .getLogger(StandbyClientHandler.class); - - private final StandbyStore store; - private final CommunicationObserver observer; - private final AtomicBoolean running; - private final int readTimeoutMs; - private final boolean autoClean; - - @Deprecated - public StandbyClientHandler(final StandbyStore store, CommunicationObserver observer, AtomicBoolean running, int readTimeoutMs, boolean autoClean) { - this.store = store; - this.observer = observer; - this.running = running; - this.readTimeoutMs = readTimeoutMs; - this.autoClean = autoClean; - } - - @Override - @Deprecated - public void channelActive(ChannelHandlerContext ctx) throws Exception { - log.debug("sending head request"); - ctx.writeAndFlush(newGetHeadReq(this.observer.getID())); - log.debug("did send head request"); - } - - @Override - @Deprecated - protected void channelRead0(ChannelHandlerContext ctx, RecordId msg) throws Exception { - setHead(ctx, msg); - }; - - @Override - @Deprecated - public void channelReadComplete(ChannelHandlerContext ctx) { - ctx.flush(); - } - - synchronized void setHead(ChannelHandlerContext ctx, RecordId head) { - if (store.getHead().getRecordId().equals(head)) { - // all sync'ed up - log.debug("no changes on sync."); - return; - } - - log.debug("updating current head to " + head); - ctx.pipeline().remove(ReadTimeoutHandler.class); - ctx.pipeline().remove(RecordIdDecoder.class); - ctx.pipeline().remove(this); - ctx.pipeline().addLast(new ReplyDecoder(store)); - - ctx.pipeline().addLast(new SegmentLoaderHandler(store, head, this.observer.getID(), running, readTimeoutMs, autoClean)); - ctx.pipeline().fireUserEventTriggered("sync"); - } - - @Override - @Deprecated - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - log.error("Exception caught, closing channel.", cause); - close(); - } - - @Override - @Deprecated - public void close() { - // This handler doesn't own resources to release - } - -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/BlobEncoder.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/BlobEncoder.java deleted file mode 100644 index 007305a..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/BlobEncoder.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby.codec; - -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.MessageToByteEncoder; - -import java.io.InputStream; -import java.nio.charset.Charset; - -import org.apache.commons.io.IOUtils; -import org.apache.jackrabbit.oak.api.Blob; - -import com.google.common.hash.Hasher; -import com.google.common.hash.Hashing; - -@Deprecated -public class BlobEncoder extends MessageToByteEncoder { - - // TODO - // if transferring large binaries turns out to be too intensive look into - // using a ChunkedWriteHandler and a new ChunkedStream(Blob.getNewStream()) - - @Override - @Deprecated - protected void encode(ChannelHandlerContext ctx, Blob b, ByteBuf out) - throws Exception { - byte[] bytes = null; - InputStream s = b.getNewStream(); - try { - bytes = IOUtils.toByteArray(s); - } finally { - s.close(); - } - - Hasher hasher = Hashing.murmur3_32().newHasher(); - long hash = hasher.putBytes(bytes).hash().padToLong(); - - out.writeInt(bytes.length); - out.writeByte(Messages.HEADER_BLOB); - - String bid = b.getContentIdentity(); - byte[] id = bid.getBytes(Charset.forName("UTF-8")); - out.writeInt(id.length); - out.writeBytes(id); - - out.writeLong(hash); - out.writeBytes(bytes); - } -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/IdArrayBasedBlob.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/IdArrayBasedBlob.java deleted file mode 100644 index 24c8dbf..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/IdArrayBasedBlob.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.standby.codec; - -import org.apache.jackrabbit.oak.plugins.memory.ArrayBasedBlob; - -@Deprecated -public class IdArrayBasedBlob extends ArrayBasedBlob { - - private final String blobId; - - @Deprecated - public IdArrayBasedBlob(byte[] value, String blobId) { - super(value); - this.blobId = blobId; - } - - @Deprecated - public String getBlobId() { - return blobId; - } -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/Messages.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/Messages.java deleted file mode 100644 index bbca3ec..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/Messages.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby.codec; - -@Deprecated -public class Messages { - - @Deprecated - public static final byte HEADER_RECORD = 0x00; - @Deprecated - public static final byte HEADER_SEGMENT = 0x01; - @Deprecated - public static final byte HEADER_BLOB = 0x02; - - @Deprecated - public static final String GET_HEAD = "h"; - @Deprecated - public static final String GET_SEGMENT = "s."; - @Deprecated - public static final String GET_BLOB = "b."; - - private static final String MAGIC = "Standby-CMD@"; - private static final String SEPARATOR = ":"; - - private static String newRequest(String clientID, String body) { - return MAGIC + (clientID == null ? "" : clientID.replace(SEPARATOR, "#")) + SEPARATOR + body + "\r\n"; - } - - @Deprecated - public static String newGetHeadReq(String clientID) { - return newRequest(clientID, GET_HEAD); - } - - @Deprecated - public static String newGetSegmentReq(String clientID, String sid) { - return newRequest(clientID, GET_SEGMENT + sid); - } - - @Deprecated - public static String newGetBlobReq(String clientID, String blobId) { - return newRequest(clientID, GET_BLOB + blobId); - } - - @Deprecated - public static String extractMessageFrom(String payload) { - if (payload.startsWith(MAGIC) && payload.length() > MAGIC.length()) { - int i = payload.indexOf(SEPARATOR); - return payload.substring(i + 1); - } - return null; - } - - @Deprecated - public static String extractClientFrom(String payload) { - if (payload.startsWith(MAGIC) && payload.length() > MAGIC.length()) { - payload = payload.substring(MAGIC.length()); - int i = payload.indexOf(SEPARATOR); - return payload.substring(0, i); - } - return null; - } -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/RecordIdDecoder.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/RecordIdDecoder.java deleted file mode 100644 index 3916600..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/RecordIdDecoder.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.standby.codec; - -import java.io.IOException; - -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.LengthFieldBasedFrameDecoder; -import io.netty.util.CharsetUtil; - -import org.apache.jackrabbit.oak.plugins.segment.RecordId; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Deprecated -public class RecordIdDecoder extends LengthFieldBasedFrameDecoder { - - private static final Logger log = LoggerFactory - .getLogger(RecordIdDecoder.class); - - private final SegmentStore store; - - @Deprecated - public RecordIdDecoder(SegmentStore store) { - super(64, 0, 4, 0, 4); - this.store = store; - } - - @Override - @Deprecated - protected Object decode(ChannelHandlerContext ctx, ByteBuf in) - throws Exception { - ByteBuf frame = (ByteBuf) super.decode(ctx, in); - if (frame == null) { - throw new IOException("Received unexpected empty frame. Maybe you have enabled secure transmission on only one endpoint of the connection."); - } - byte type = frame.readByte(); - frame.discardReadBytes(); - String id = frame.toString(CharsetUtil.UTF_8); - try { - log.debug("received type {} with id {}", type, id); - return RecordId.fromString(store.getTracker(), id); - } catch (IllegalArgumentException e) { - log.error(e.getMessage(), e); - } - return null; - } - - @Override - @Deprecated - protected ByteBuf extractFrame(ChannelHandlerContext ctx, ByteBuf buffer, - int index, int length) { - return buffer.slice(index, length); - } - -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/RecordIdEncoder.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/RecordIdEncoder.java deleted file mode 100644 index 775ee34..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/RecordIdEncoder.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.standby.codec; - -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.MessageToByteEncoder; -import io.netty.util.CharsetUtil; - -import org.apache.jackrabbit.oak.plugins.segment.RecordId; - -@Deprecated -public class RecordIdEncoder extends MessageToByteEncoder { - - @Override - @Deprecated - protected void encode(ChannelHandlerContext ctx, RecordId msg, ByteBuf out) - throws Exception { - byte[] body = msg.toString().getBytes(CharsetUtil.UTF_8); - out.writeInt(body.length + 1); - out.writeByte(Messages.HEADER_RECORD); - out.writeBytes(body); - } -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/ReplyDecoder.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/ReplyDecoder.java deleted file mode 100644 index 69337c0..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/ReplyDecoder.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.standby.codec; - -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.ReplayingDecoder; - -import java.nio.ByteBuffer; -import java.nio.charset.Charset; -import java.util.List; -import java.util.UUID; - -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.standby.codec.ReplyDecoder.DecodingState; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.hash.Hasher; -import com.google.common.hash.Hashing; - -@Deprecated -public class ReplyDecoder extends ReplayingDecoder { - - @Deprecated - public enum DecodingState { - @Deprecated - HEADER, - @Deprecated - SEGMENT, - @Deprecated - BLOB - } - - private static final Logger log = LoggerFactory - .getLogger(ReplyDecoder.class); - - private final SegmentStore store; - - private int length = -1; - private byte type = -1; - - @Deprecated - public ReplyDecoder(SegmentStore store) { - super(DecodingState.HEADER); - this.store = store; - } - - @Deprecated - private void reset() { - checkpoint(DecodingState.HEADER); - length = -1; - type = -1; - } - - @Override - @Deprecated - protected void decode(ChannelHandlerContext ctx, ByteBuf in, - List out) throws Exception { - - switch (state()) { - case HEADER: { - length = in.readInt(); - type = in.readByte(); - switch (type) { - case Messages.HEADER_SEGMENT: - checkpoint(DecodingState.SEGMENT); - break; - case Messages.HEADER_BLOB: - checkpoint(DecodingState.BLOB); - break; - default: - throw new Exception("Unknown type: " + type); - } - return; - } - - case SEGMENT: { - Segment s = decodeSegment(in, length, type); - if (s != null) { - out.add(SegmentReply.empty()); - ctx.fireUserEventTriggered(new SegmentReply(s)); - reset(); - } - return; - } - - case BLOB: { - IdArrayBasedBlob b = decodeBlob(in, length, type); - if (b != null) { - out.add(SegmentReply.empty()); - ctx.fireUserEventTriggered(new SegmentReply(b)); - reset(); - } - return; - } - - default: - throw new Exception("Unknown decoding state: " + state()); - } - } - - private Segment decodeSegment(ByteBuf in, int len, byte type) { - long msb = in.readLong(); - long lsb = in.readLong(); - long hash = in.readLong(); - - // #readBytes throws a 'REPLAY' exception if there are not enough bytes - // available for reading - ByteBuf data = in.readBytes(len - 25); - byte[] segment; - if (data.hasArray()) { - segment = data.array(); - } else { - segment = new byte[len - 25]; - in.readBytes(segment); - } - - Hasher hasher = Hashing.murmur3_32().newHasher(); - long check = hasher.putBytes(segment).hash().padToLong(); - if (hash == check) { - SegmentId id = new SegmentId(store.getTracker(), msb, lsb); - Segment s = new Segment(store.getTracker(), id, - ByteBuffer.wrap(segment)); - log.debug("received segment with id {} and size {}", id, s.size()); - return s; - } - log.debug("received corrupted segment {}, ignoring", new UUID(msb, lsb)); - return null; - } - - private IdArrayBasedBlob decodeBlob(ByteBuf in, int length, byte type) { - int inIdLen = in.readInt(); - byte[] bid = new byte[inIdLen]; - in.readBytes(bid); - String id = new String(bid, Charset.forName("UTF-8")); - - long hash = in.readLong(); - // #readBytes throws a 'REPLAY' exception if there are not enough bytes - // available for reading - ByteBuf data = in.readBytes(length); - byte[] blob; - if (data.hasArray()) { - blob = data.array(); - } else { - blob = new byte[length]; - data.readBytes(blob); - } - - Hasher hasher = Hashing.murmur3_32().newHasher(); - long check = hasher.putBytes(blob).hash().padToLong(); - if (hash == check) { - log.debug("received blob with id {} and size {}", id, blob.length); - return new IdArrayBasedBlob(blob, id); - } - log.debug("received corrupted binary {}, ignoring", id); - return null; - } - -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/SegmentDecoder.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/SegmentDecoder.java deleted file mode 100644 index 8f10b09..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/SegmentDecoder.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.standby.codec; - -import static org.apache.jackrabbit.oak.plugins.segment.standby.codec.SegmentEncoder.EXTRA_HEADERS_LEN; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.LengthFieldBasedFrameDecoder; - -import java.nio.ByteBuffer; -import java.util.UUID; - -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.hash.Hasher; -import com.google.common.hash.Hashing; - -@Deprecated -public class SegmentDecoder extends LengthFieldBasedFrameDecoder { - - private static final Logger log = LoggerFactory - .getLogger(SegmentDecoder.class); - - /** - * the maximum possible size a header message might have - */ - private static final int MAX_LENGHT = Segment.MAX_SEGMENT_SIZE - + EXTRA_HEADERS_LEN; - - private final SegmentStore store; - - public SegmentDecoder(SegmentStore store) { - super(MAX_LENGHT, 0, 4, 0, 0); - this.store = store; - } - - @Override - protected Object decode(ChannelHandlerContext ctx, ByteBuf in) - throws Exception { - ByteBuf frame = (ByteBuf) super.decode(ctx, in); - if (frame == null) { - return null; - } - int len = frame.readInt(); - byte type = frame.readByte(); - long msb = frame.readLong(); - long lsb = frame.readLong(); - long hash = frame.readLong(); - byte[] segment = new byte[len - 25]; - frame.getBytes(29, segment); - Hasher hasher = Hashing.murmur3_32().newHasher(); - long check = hasher.putBytes(segment).hash().padToLong(); - if (hash == check) { - SegmentId id = new SegmentId(store.getTracker(), msb, lsb); - Segment s = new Segment(store.getTracker(), id, - ByteBuffer.wrap(segment)); - log.debug("received type {} with id {} and size {}", type, id, - s.size()); - return s; - } - log.debug("received corrupted segment {}, ignoring", new UUID(msb, lsb)); - return null; - - } - - @Override - protected ByteBuf extractFrame(ChannelHandlerContext ctx, ByteBuf buffer, - int index, int length) { - return buffer.slice(index, length); - } - -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/SegmentEncoder.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/SegmentEncoder.java deleted file mode 100644 index a57ba4d..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/SegmentEncoder.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.standby.codec; - -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.MessageToByteEncoder; - -import java.io.ByteArrayOutputStream; - -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; - -import com.google.common.hash.Hasher; -import com.google.common.hash.Hashing; - -@Deprecated -public class SegmentEncoder extends MessageToByteEncoder { - - /** - * A segment message is composed of: - * - *
-     *  - (4 bytes) the message length
-     *  - (1 byte ) a message type (not currently used)
-     *  - (8 bytes) segment id most significant bits
-     *  - (8 bytes) segment id least significant bits
-     *  - (8 bytes) checksum hash
-     * 
- */ - static int EXTRA_HEADERS_LEN = 29; - - /** - * the header size not including the length - */ - private int EXTRA_HEADERS_WO_SIZE = EXTRA_HEADERS_LEN - 4; - - @Override - @Deprecated - protected void encode(ChannelHandlerContext ctx, Segment s, ByteBuf out) - throws Exception { - SegmentId id = s.getSegmentId(); - ByteArrayOutputStream baos = new ByteArrayOutputStream(s.size()); - s.writeTo(baos); - byte[] segment = baos.toByteArray(); - - Hasher hasher = Hashing.murmur3_32().newHasher(); - long hash = hasher.putBytes(segment).hash().padToLong(); - - int len = segment.length + EXTRA_HEADERS_WO_SIZE; - out.writeInt(len); - out.writeByte(Messages.HEADER_SEGMENT); - out.writeLong(id.getMostSignificantBits()); - out.writeLong(id.getLeastSignificantBits()); - out.writeLong(hash); - out.writeBytes(segment); - } -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/SegmentReply.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/SegmentReply.java deleted file mode 100644 index fe7d4ce..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/codec/SegmentReply.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby.codec; - -import org.apache.jackrabbit.oak.plugins.segment.Segment; - -@Deprecated -public class SegmentReply { - - @Deprecated - public static final int SEGMENT = 0; - @Deprecated - public static final int BLOB = 1; - - @Deprecated - public static SegmentReply empty() { - return new SegmentReply(); - } - - private final int type; - - private final Segment segment; - - private final IdArrayBasedBlob blob; - - @Deprecated - public SegmentReply(Segment segment) { - this.type = SEGMENT; - this.segment = segment; - this.blob = null; - } - - @Deprecated - public SegmentReply(IdArrayBasedBlob blob) { - this.type = BLOB; - this.segment = null; - this.blob = blob; - } - - private SegmentReply() { - this.type = -1; - this.segment = null; - this.blob = null; - } - - @Deprecated - public Segment getSegment() { - return this.segment; - } - - @Deprecated - public IdArrayBasedBlob getBlob() { - return blob; - } - - @Deprecated - public int getType() { - return type; - } - -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/jmx/ClientStandbyStatusMBean.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/jmx/ClientStandbyStatusMBean.java deleted file mode 100644 index 315deba..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/jmx/ClientStandbyStatusMBean.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.standby.jmx; - -import org.apache.jackrabbit.oak.commons.jmx.Description; - -@Deprecated -public interface ClientStandbyStatusMBean extends StandbyStatusMBean { - - @Description("number of consecutive failed requests") - @Deprecated - int getFailedRequests(); - - @Description("number of seconds since last successful request") - @Deprecated - int getSecondsSinceLastSuccess(); - - @Description("Local timestamp of the moment when the last sync cycle was started") - @Deprecated - long getSyncStartTimestamp(); - - @Description("Local timestamp of the moment when the last sync cycle ended") - @Deprecated - long getSyncEndTimestamp(); - - // expose the informations as operations, too - - @Description("number of consecutive failed requests") - @Deprecated - int calcFailedRequests(); - - @Description("number of seconds since last successful request") - @Deprecated - int calcSecondsSinceLastSuccess(); - - @Description("Runs garbage collection") - @Deprecated - void cleanup(); - -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/jmx/ObservablePartnerMBean.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/jmx/ObservablePartnerMBean.java deleted file mode 100644 index c94f1fc..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/jmx/ObservablePartnerMBean.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby.jmx; - -import org.apache.jackrabbit.oak.commons.jmx.Description; - -import javax.annotation.CheckForNull; -import javax.annotation.Nonnull; - -@Deprecated -public interface ObservablePartnerMBean { - - @Nonnull - @Description("name of the partner") - @Deprecated - String getName(); - - @Description("IP of the remote") - @Deprecated - String getRemoteAddress(); - - @Description("Last request") - @Deprecated - String getLastRequest(); - - @Description("Port of the remote") - @Deprecated - int getRemotePort(); - - @CheckForNull - @Description("Time the remote instance was last contacted") - @Deprecated - String getLastSeenTimestamp(); - - @Description("Number of transferred segments") - @Deprecated - long getTransferredSegments(); - - @Description("Number of bytes stored in transferred segments") - @Deprecated - long getTransferredSegmentBytes(); - - @Description("Number of transferred binaries") - @Deprecated - long getTransferredBinaries(); - - @Description("Number of bytes stored in transferred binaries") - @Deprecated - long getTransferredBinariesBytes(); - -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/jmx/StandbyStatusMBean.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/jmx/StandbyStatusMBean.java deleted file mode 100644 index fbd5ee1..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/jmx/StandbyStatusMBean.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.jackrabbit.oak.plugins.segment.standby.jmx; - -import org.apache.jackrabbit.oak.commons.jmx.Description; -import javax.annotation.Nonnull; - -@Deprecated -public interface StandbyStatusMBean { - @Deprecated - public static final String JMX_NAME = "org.apache.jackrabbit.oak:name=Status,type=\"Standby\""; - @Deprecated - public static final String STATUS_INITIALIZING = "initializing"; - @Deprecated - public static final String STATUS_STOPPED = "stopped"; - @Deprecated - public static final String STATUS_STARTING = "starting"; - @Deprecated - public static final String STATUS_RUNNING = "running"; - @Deprecated - public static final String STATUS_CLOSING = "closing"; - @Deprecated - public static final String STATUS_CLOSED = "closed"; - - @Nonnull - @Description("primary or standby") - @Deprecated - String getMode(); - - @Description("current status of the service") - @Deprecated - String getStatus(); - - @Description("instance is running") - @Deprecated - boolean isRunning(); - - @Description("stop the communication") - @Deprecated - void stop(); - - @Description("start the communication") - @Deprecated - void start(); -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/server/StandbyServer.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/server/StandbyServer.java deleted file mode 100644 index 24a36cb..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/server/StandbyServer.java +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby.server; - -import io.netty.bootstrap.ServerBootstrap; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.ChannelOption; -import io.netty.channel.ChannelPipeline; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.SocketChannel; -import io.netty.channel.socket.nio.NioServerSocketChannel; -import io.netty.handler.codec.LineBasedFrameDecoder; -import io.netty.handler.codec.compression.SnappyFramedEncoder; -import io.netty.handler.codec.string.StringDecoder; -import io.netty.handler.ssl.SslContext; -import io.netty.handler.ssl.util.SelfSignedCertificate; -import io.netty.util.CharsetUtil; -import io.netty.util.concurrent.Future; - -import java.io.Closeable; -import java.lang.management.ManagementFactory; -import java.security.cert.CertificateException; -import java.util.concurrent.TimeUnit; - -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.standby.codec.BlobEncoder; -import org.apache.jackrabbit.oak.plugins.segment.standby.codec.RecordIdEncoder; -import org.apache.jackrabbit.oak.plugins.segment.standby.codec.SegmentEncoder; -import org.apache.jackrabbit.oak.plugins.segment.standby.jmx.StandbyStatusMBean; -import org.apache.jackrabbit.oak.plugins.segment.standby.store.CommunicationObserver; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.InstanceNotFoundException; -import javax.management.MBeanServer; -import javax.management.ObjectName; -import javax.management.StandardMBean; -import javax.net.ssl.SSLException; - -@Deprecated -public class StandbyServer implements StandbyStatusMBean, Closeable { - - private static final Logger log = LoggerFactory - .getLogger(StandbyServer.class); - - private final int port; - - private final EventLoopGroup bossGroup; - private final EventLoopGroup workerGroup; - private final ServerBootstrap b; - private final CommunicationObserver observer; - private final StandbyServerHandler handler; - private SslContext sslContext; - private ChannelFuture channelFuture; - private boolean running; - - @Deprecated - public StandbyServer(int port, final SegmentStore store) - throws CertificateException, SSLException { - this(port, store, null, false); - } - - @Deprecated - public StandbyServer(int port, final SegmentStore store, boolean secure) - throws CertificateException, SSLException { - this(port, store, null, secure); - } - - @Deprecated - public StandbyServer(int port, final SegmentStore store, String[] allowedClientIPRanges) - throws CertificateException, SSLException { - this(port, store, allowedClientIPRanges, false); - } - - @Deprecated - public StandbyServer(int port, final SegmentStore store, String[] allowedClientIPRanges, boolean secure) - throws CertificateException, SSLException { - this.port = port; - - if (secure) { - SelfSignedCertificate ssc = new SelfSignedCertificate(); - sslContext = SslContext.newServerContext(ssc.certificate(), ssc.privateKey()); - } - - observer = new CommunicationObserver("primary"); - handler = new StandbyServerHandler(store, observer, allowedClientIPRanges); - bossGroup = new NioEventLoopGroup(1); - workerGroup = new NioEventLoopGroup(); - - final MBeanServer jmxServer = ManagementFactory.getPlatformMBeanServer(); - try { - jmxServer.registerMBean(new StandardMBean(this, StandbyStatusMBean.class), new ObjectName(this.getMBeanName())); - } - catch (Exception e) { - log.error("can't register standby status mbean", e); - } - - b = new ServerBootstrap(); - b.group(bossGroup, workerGroup); - b.channel(NioServerSocketChannel.class); - - b.option(ChannelOption.TCP_NODELAY, true); - b.option(ChannelOption.SO_REUSEADDR, true); - b.childOption(ChannelOption.TCP_NODELAY, true); - b.childOption(ChannelOption.SO_REUSEADDR, true); - b.childOption(ChannelOption.SO_KEEPALIVE, true); - - b.childHandler(new ChannelInitializer() { - @Override - public void initChannel(SocketChannel ch) throws Exception { - ChannelPipeline p = ch.pipeline(); - if (sslContext != null) { - p.addLast(sslContext.newHandler(ch.alloc())); - } - p.addLast(new LineBasedFrameDecoder(8192)); - p.addLast(new StringDecoder(CharsetUtil.UTF_8)); - p.addLast(new SnappyFramedEncoder()); - p.addLast(new RecordIdEncoder()); - p.addLast(new SegmentEncoder()); - p.addLast(new BlobEncoder()); - p.addLast(handler); - } - }); - } - - @Deprecated - public String getMBeanName() { - return StandbyStatusMBean.JMX_NAME + ",id=" + this.port; - } - - @Deprecated - public void close() { - stop(); - handler.state = STATUS_CLOSING; - observer.unregister(); - final MBeanServer jmxServer = ManagementFactory.getPlatformMBeanServer(); - try { - jmxServer.unregisterMBean(new ObjectName(this.getMBeanName())); - } catch (InstanceNotFoundException e) { - // ignore - } catch (Exception e) { - log.error("can't unregister standby status mbean", e); - } - if (bossGroup != null && !bossGroup.isShuttingDown()) { - bossGroup.shutdownGracefully(0, 1, TimeUnit.SECONDS).syncUninterruptibly(); - } - if (workerGroup != null && !workerGroup.isShuttingDown()) { - workerGroup.shutdownGracefully(0, 1, TimeUnit.SECONDS).syncUninterruptibly(); - } - handler.state = STATUS_CLOSED; - } - - private void start(boolean wait) { - if (running) return; - - this.handler.state = STATUS_STARTING; - - final Thread close = new Thread() { - @Override - public void run() { - try { - running = true; - handler.state = STATUS_RUNNING; - channelFuture.sync().channel().closeFuture().sync(); - } catch (InterruptedException e) { - StandbyServer.this.stop(); - } - } - }; - final ChannelFutureListener bindListener = new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) { - if (future.isSuccess()) { - close.start(); - } else { - log.error("Server failed to start on port " + port - + ", will be canceled", future.cause()); - future.channel().close(); - new Thread() { - @Override - public void run() { - close(); - } - }.start(); - } - } - }; - Future startup = bossGroup.submit(new Runnable() { - @Override - public void run() { - //netty 4.0.20 has a race condition issue with - //asynchronous channel registration. As a workaround - //we bind asynchronously from the boss event group to make - //the channel registration synchronous. - //Note that now this method will return immediately. - channelFuture = b.bind(port); - channelFuture.addListener(bindListener); - } - }); - if (!startup.awaitUninterruptibly(10000)) { - log.error("Server failed to start within 10 seconds and will be canceled"); - startup.cancel(true); - } else if (wait) { - try { - close.join(); - } catch (InterruptedException ignored) {} - } - } - - @Deprecated - public void startAndWait() { - start(true); - } - - @Override - @Deprecated - public void start() { - start(false); - } - - @Override - @Deprecated - public String getMode() { - return "primary"; - } - - @Override - @Deprecated - public boolean isRunning() { return running; } - - @Override - @Deprecated - public void stop() { - if (running) { - running = false; - this.handler.state = STATUS_STOPPED; - channelFuture.channel().disconnect(); - } - } - - @Override - @Deprecated - public String getStatus() { - return handler == null ? STATUS_INITIALIZING : handler.state; - } -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/server/StandbyServerHandler.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/server/StandbyServerHandler.java deleted file mode 100644 index edd46b7..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/server/StandbyServerHandler.java +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby.server; - -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import io.netty.buffer.Unpooled; -import io.netty.channel.ChannelHandler.Sharable; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.SimpleChannelInboundHandler; - -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.IllegalRepositoryStateException; -import org.apache.jackrabbit.oak.plugins.segment.RecordId; -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.standby.codec.Messages; -import org.apache.jackrabbit.oak.plugins.segment.standby.store.CommunicationObserver; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Sharable -@Deprecated -public class StandbyServerHandler extends SimpleChannelInboundHandler { - - private static final Logger log = LoggerFactory - .getLogger(StandbyServerHandler.class); - - private final SegmentStore store; - private final CommunicationObserver observer; - private final String[] allowedIPRanges; - @Deprecated - public String state; - - @Deprecated - public StandbyServerHandler(SegmentStore store, CommunicationObserver observer, String[] allowedIPRanges) { - this.store = store; - this.observer = observer; - this.allowedIPRanges = allowedIPRanges; - } - - private RecordId headId() { - if (store != null) { - return store.getHead().getRecordId(); - } - return null; - } - - private static long ipToLong(InetAddress ip) { - byte[] octets = ip.getAddress(); - long result = 0; - for (byte octet : octets) { - result <<= 8; - result |= octet & 0xff; - } - return result; - } - - private boolean clientAllowed(InetSocketAddress client) { - if (this.allowedIPRanges != null && this.allowedIPRanges.length > 0) { - for (String s : this.allowedIPRanges) { - try { - if (ipToLong(InetAddress.getByName(s)) == ipToLong(client.getAddress())) { - return true; - } - } - catch (UnknownHostException ignored) { /* it's an ip range */ } - int i = s.indexOf('-'); - if (i > 0) { - try { - long startIPRange = ipToLong(InetAddress.getByName(s.substring(0, i).trim())); - long endIPRange = ipToLong(InetAddress.getByName(s.substring(i + 1).trim())); - long ipl = ipToLong(client.getAddress()); - if (startIPRange <= ipl && ipl <= endIPRange) return true; - } - catch (Exception e) { - log.warn("invalid IP-range format: " + s); - } - } - } - return false; - } - return true; - } - - @Override - @Deprecated - public void channelRegistered(io.netty.channel.ChannelHandlerContext ctx) throws java.lang.Exception { - state = "channel registered"; - super.channelRegistered(ctx); - } - - @Override - @Deprecated - public void channelActive(io.netty.channel.ChannelHandlerContext ctx) throws java.lang.Exception { - state = "channel active"; - super.channelActive(ctx); - } - - @Override - @Deprecated - public void channelInactive(io.netty.channel.ChannelHandlerContext ctx) throws java.lang.Exception { - state = "channel inactive"; - super.channelInactive(ctx); - } - - @Override - @Deprecated - public void channelUnregistered(io.netty.channel.ChannelHandlerContext ctx) throws java.lang.Exception { - state = "channel unregistered"; - super.channelUnregistered(ctx); - } - - @Override - @Deprecated - public void channelRead0(ChannelHandlerContext ctx, String payload) - throws Exception { - state = "got message"; - - String request = Messages.extractMessageFrom(payload); - InetSocketAddress client = (InetSocketAddress)ctx.channel().remoteAddress(); - - if (!clientAllowed(client)) { - log.warn("Got request from client " + client + " which is not in the allowed ip ranges! Request will be ignored."); - } - else { - String clientID = Messages.extractClientFrom(payload); - observer.gotMessageFrom(clientID, request, client); - if (Messages.GET_HEAD.equalsIgnoreCase(request)) { - RecordId r = headId(); - if (r != null) { - ctx.writeAndFlush(r); - return; - } - } else if (request.startsWith(Messages.GET_SEGMENT)) { - String sid = request.substring(Messages.GET_SEGMENT.length()); - log.debug("request segment id {}", sid); - UUID uuid = UUID.fromString(sid); - - Segment s = null; - - for (int i = 0; i < 10; i++) { - try { - s = store.readSegment(new SegmentId(store.getTracker(), - uuid.getMostSignificantBits(), uuid - .getLeastSignificantBits())); - } catch (IllegalRepositoryStateException e) { - // segment not found - log.debug("waiting for segment. Got exception: " + e.getMessage()); - TimeUnit.MILLISECONDS.sleep(2000); - } - if (s != null) break; - } - - if (s != null) { - log.debug("sending segment " + sid + " to " + client); - ctx.writeAndFlush(s); - observer.didSendSegmentBytes(clientID, s.size()); - return; - } - } else if (request.startsWith(Messages.GET_BLOB)) { - String bid = request.substring(Messages.GET_BLOB.length()); - log.debug("request blob id {}", bid); - Blob b = store.readBlob(bid); - log.debug("sending blob " + bid + " to " + client); - ctx.writeAndFlush(b); - observer.didSendBinariesBytes(clientID, - Math.max(0, (int) b.length())); - return; - } else { - log.warn("Unknown request {}, ignoring.", request); - } - } - ctx.writeAndFlush(Unpooled.EMPTY_BUFFER); - } - - @Override - @Deprecated - public void channelReadComplete(ChannelHandlerContext ctx) { - ctx.flush(); - } - - @Override - @Deprecated - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { - state = "exception occurred: " + cause.getMessage(); - boolean isReadTimeout = cause.getMessage() != null - && cause.getMessage().contains("Connection reset by peer"); - if (isReadTimeout) { - log.warn("Exception occurred: " + cause.getMessage(), cause); - } else { - log.error("Exception occurred: " + cause.getMessage(), cause); - } - } -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/store/CommunicationObserver.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/store/CommunicationObserver.java deleted file mode 100644 index 0d0e2e6..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/store/CommunicationObserver.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby.store; - - -import org.apache.jackrabbit.oak.plugins.segment.standby.jmx.StandbyStatusMBean; -import org.apache.jackrabbit.oak.plugins.segment.standby.jmx.ObservablePartnerMBean; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.MBeanServer; -import javax.management.MalformedObjectNameException; -import javax.management.ObjectName; -import javax.management.StandardMBean; - -import java.lang.management.ManagementFactory; -import java.net.InetSocketAddress; -import java.util.Date; -import java.util.HashMap; -import java.util.Map; - -@Deprecated -public class CommunicationObserver { - private static final int MAX_CLIENT_STATISTICS = 10; - - private class CommunicationPartnerMBean implements ObservablePartnerMBean { - private final ObjectName mbeanName; - private final String clientName; - public String lastRequest; - public Date lastSeen; - public String remoteAddress; - public int remotePort; - public long segmentsSent; - public long segmentBytesSent; - public long binariesSent; - public long binariesBytesSent; - - public CommunicationPartnerMBean(String clientName) throws MalformedObjectNameException { - this.clientName = clientName; - this.mbeanName = new ObjectName(StandbyStatusMBean.JMX_NAME + ",id=\"Client " + clientName + "\""); - } - - public ObjectName getMBeanName() { - return this.mbeanName; - } - - @Override - public String getName() { - return this.clientName; - } - - @Override - public String getRemoteAddress() { - return this.remoteAddress; - } - - @Override - public String getLastRequest() { - return this.lastRequest; - } - - @Override - public int getRemotePort() { - return this.remotePort; - } - - @Override - public String getLastSeenTimestamp() { - return this.lastSeen == null ? null : this.lastSeen.toString(); - } - - @Override - public long getTransferredSegments() { - return this.segmentsSent; - } - - @Override - public long getTransferredSegmentBytes() { - return this.segmentBytesSent; - } - - @Override - public long getTransferredBinaries() { - return this.binariesSent; - } - - @Override - public long getTransferredBinariesBytes() { - return this.binariesBytesSent; - } - } - - private static final Logger log = LoggerFactory - .getLogger(CommunicationObserver.class); - - private final String identifier; - private final Map partnerDetails; - - @Deprecated - public CommunicationObserver(String myID) { - this.identifier = myID; - this.partnerDetails = new HashMap(); - } - - private void unregister(CommunicationPartnerMBean m) { - final MBeanServer jmxServer = ManagementFactory.getPlatformMBeanServer(); - try { - jmxServer.unregisterMBean(m.getMBeanName()); - } - catch (Exception e) { - log.error("error unregistering mbean for client '" + m.getName() + "'", e); - } - } - - @Deprecated - public void unregister() { - for (CommunicationPartnerMBean m : this.partnerDetails.values()) { - unregister(m); - } - } - - @Deprecated - public void gotMessageFrom(String client, String request, InetSocketAddress remote) throws MalformedObjectNameException { - log.debug("got message '" + request + "' from client " + client); - CommunicationPartnerMBean m = this.partnerDetails.get(client); - boolean register = false; - if (m == null) { - cleanUp(); - m = new CommunicationPartnerMBean(client); - m.remoteAddress = remote.getAddress().getHostAddress(); - m.remotePort = remote.getPort(); - register = true; - } - m.lastSeen = new Date(); - m.lastRequest = request; - this.partnerDetails.put(client, m); - if (register) { - final MBeanServer jmxServer = ManagementFactory.getPlatformMBeanServer(); - try { - jmxServer.registerMBean(new StandardMBean(m, ObservablePartnerMBean.class), m.getMBeanName()); - } - catch (Exception e) { - log.error("can register mbean for client '" + m.getName() + "'", e); - } - } - } - - @Deprecated - public void didSendSegmentBytes(String client, int size) { - log.debug("did send segment with " + size + " bytes to client " + client); - CommunicationPartnerMBean m = this.partnerDetails.get(client); - m.segmentsSent++; - m.segmentBytesSent += size; - this.partnerDetails.put(client, m); - } - - @Deprecated - public void didSendBinariesBytes(String client, int size) { - log.debug("did send binary with " + size + " bytes to client " + client); - CommunicationPartnerMBean m = this.partnerDetails.get(client); - m.binariesSent++; - m.binariesBytesSent += size; - this.partnerDetails.put(client, m); - } - - @Deprecated - public String getID() { - return this.identifier; - } - - // helper - - private void cleanUp() { - while (this.partnerDetails.size() >= MAX_CLIENT_STATISTICS) { - CommunicationPartnerMBean oldestEntry = oldestEntry(); - if (oldestEntry == null) return; - log.info("housekeeping: removing statistics for " + oldestEntry.getName()); - unregister(oldestEntry); - this.partnerDetails.remove(oldestEntry.getName()); - } - } - - private CommunicationPartnerMBean oldestEntry() { - CommunicationPartnerMBean ret = null; - for (CommunicationPartnerMBean m : this.partnerDetails.values()) { - if (ret == null || ret.lastSeen.after(m.lastSeen)) ret = m; - } - return ret; - } -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/store/RemoteSegmentLoader.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/store/RemoteSegmentLoader.java deleted file mode 100644 index 836304c..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/store/RemoteSegmentLoader.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby.store; - -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.plugins.segment.Segment; - -@Deprecated -public interface RemoteSegmentLoader { - - @Deprecated - Segment readSegment(String id); - - @Deprecated - Blob readBlob(String blobId); - - @Deprecated - void close(); - - @Deprecated - boolean isClosed(); - - @Deprecated - boolean isRunning(); - -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/store/StandbyStore.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/store/StandbyStore.java deleted file mode 100644 index 68393db..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/store/StandbyStore.java +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby.store; - -import static com.google.common.collect.Sets.newHashSet; -import static org.apache.jackrabbit.oak.commons.IOUtils.humanReadableByteCount; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.ArrayDeque; -import java.util.Deque; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.plugins.segment.Segment; -import org.apache.jackrabbit.oak.plugins.segment.SegmentId; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentTracker; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Deprecated -public class StandbyStore implements SegmentStore { - - private static final Logger log = LoggerFactory.getLogger(StandbyStore.class); - - private final SegmentTracker tracker = new SegmentTracker(this); - - private final SegmentStore delegate; - - private RemoteSegmentLoader loader; - - @Deprecated - public StandbyStore(SegmentStore delegate) { - this.delegate = delegate; - } - - @Override - @Deprecated - public SegmentTracker getTracker() { - return tracker; - } - - @Override - @Deprecated - public SegmentNodeState getHead() { - return delegate.getHead(); - } - - @Override - @Deprecated - public boolean setHead(SegmentNodeState base, SegmentNodeState head) { - return delegate.setHead(base, head); - } - - @Override - @Deprecated - public boolean containsSegment(SegmentId id) { - return delegate.containsSegment(id); - } - - @Override - @Deprecated - public Segment readSegment(SegmentId sid) { - callId++; - Deque ids = new ArrayDeque(); - ids.offer(sid); - int err = 0; - Set persisted = new HashSet(); - - Map cache = new HashMap(); - long cacheOps = 0; - - long cacheWeight = 0; - long maxWeight = 0; - long maxKeys = 0; - - Set visited = newHashSet(); - - while (!ids.isEmpty()) { - SegmentId id = ids.remove(); - - visited.add(id); - - if (!persisted.contains(id) && !delegate.containsSegment(id)) { - Segment s; - boolean logRefs = true; - if (cache.containsKey(id)) { - s = cache.remove(id); - cacheWeight -= s.size(); - cacheOps++; - logRefs = false; - } else { - log.debug("transferring segment {}", id); - s = loader.readSegment(id.toString()); - } - - if (s != null) { - log.debug("processing segment {} with size {}", id, - s.size()); - if (id.isDataSegmentId()) { - boolean hasPendingRefs = false; - List refs = s.getReferencedIds(); - if (logRefs) { - log.debug("{} -> {}", id, refs); - } - for (SegmentId nr : refs) { - // skip already persisted or self-ref - if (persisted.contains(nr) || id.equals(nr) || visited.contains(nr)) { - continue; - } - hasPendingRefs = true; - if (!ids.contains(nr)) { - if (nr.isBulkSegmentId()) { - // binaries first - ids.addFirst(nr); - } else { - // data segments last - ids.add(nr); - } - } - } - - if (!hasPendingRefs) { - persisted.add(id); - persist(id, s); - } else { - // persist it later, after the refs are in place - ids.add(id); - - // TODO there is a chance this might introduce - // a OOME because of the position of the current - // segment in the processing queue. putting it at - // the end of the current queue means it will stay - // in the cache until the pending queue of the - // segment's references is processed. - cache.put(id, s); - cacheWeight += s.size(); - cacheOps++; - - maxWeight = Math.max(maxWeight, cacheWeight); - maxKeys = Math.max(maxKeys, cache.size()); - } - } else { - persisted.add(id); - persist(id, s); - } - ids.removeAll(persisted); - err = 0; - } else { - log.error("could NOT read segment {}", id); - if (loader.isClosed() || err == 4) { - loader.close(); - throw new IllegalStateException( - "Unable to load remote segment " + id); - } - err++; - ids.addFirst(id); - } - } else { - persisted.add(id); - } - } - cacheStats.put(callId, "W: " + humanReadableByteCount(maxWeight) - + ", Keys: " + maxKeys + ", Ops: " + cacheOps); - return delegate.readSegment(sid); - } - - @Deprecated - public void persist(SegmentId in, Segment s) { - SegmentId id = delegate.getTracker().getSegmentId( - in.getMostSignificantBits(), in.getLeastSignificantBits()); - log.debug("persisting segment {} with size {}", id, s.size()); - try { - ByteArrayOutputStream bout = new ByteArrayOutputStream(s.size()); - s.writeTo(bout); - writeSegment(id, bout.toByteArray(), 0, s.size()); - } catch (IOException e) { - throw new IllegalStateException("Unable to write remote segment " - + id, e); - } - } - - private long callId = 0; - private Map cacheStats; - - @Deprecated - public void preSync(RemoteSegmentLoader loader) { - this.loader = loader; - this.cacheStats = new HashMap(); - } - - @Deprecated - public void postSync() { - loader = null; - if (log.isDebugEnabled() && !cacheStats.isEmpty()) { - log.debug("sync cache stats {}", cacheStats); - } - cacheStats = null; - } - - @Override - @Deprecated - public void writeSegment(SegmentId id, byte[] bytes, int offset, int length) throws IOException { - delegate.writeSegment(id, bytes, offset, length); - } - - @Override - @Deprecated - public void close() { - delegate.close(); - } - - @Override - @Deprecated - public Blob readBlob(String reference) { - return delegate.readBlob(reference); - } - - @Override - @Deprecated - public BlobStore getBlobStore() { - return delegate.getBlobStore(); - } - - @Override - @Deprecated - public void gc() { - delegate.gc(); - } - - @Deprecated - public long size() { - if (delegate instanceof FileStore) { - return ((FileStore) delegate).size(); - } - return -1; - } - - @Deprecated - public void cleanup() { - if (delegate instanceof FileStore) { - try { - delegate.getTracker().getWriter().dropCache(); - ((FileStore) delegate).flush(true); - } catch (IOException e) { - log.error("Error running cleanup", e); - } - } else { - log.warn("Delegate is not a FileStore, ignoring cleanup call"); - } - } -} diff --git oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/store/StandbyStoreService.java oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/store/StandbyStoreService.java deleted file mode 100644 index c7b4dbe..0000000 --- oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/store/StandbyStoreService.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby.store; - -import static java.lang.String.valueOf; -import static org.apache.felix.scr.annotations.ReferencePolicy.STATIC; -import static org.apache.felix.scr.annotations.ReferencePolicyOption.GREEDY; - -import java.io.IOException; -import java.security.cert.CertificateException; -import java.util.Dictionary; -import java.util.Hashtable; - -import javax.net.ssl.SSLException; - -import org.apache.felix.scr.annotations.Activate; -import org.apache.felix.scr.annotations.Component; -import org.apache.felix.scr.annotations.ConfigurationPolicy; -import org.apache.felix.scr.annotations.Deactivate; -import org.apache.felix.scr.annotations.Property; -import org.apache.felix.scr.annotations.PropertyOption; -import org.apache.felix.scr.annotations.Reference; -import org.apache.jackrabbit.oak.commons.PropertiesUtil; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStoreProvider; -import org.apache.jackrabbit.oak.plugins.segment.SegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.standby.client.StandbyClient; -import org.apache.jackrabbit.oak.plugins.segment.standby.server.StandbyServer; -import org.osgi.framework.ServiceRegistration; -import org.osgi.service.component.ComponentContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Property(name = "org.apache.sling.installer.configuration.persist", label="Persist configuration", description = "Must be always disabled to avoid storing the configuration in the repository", boolValue = false) -@Component(metatype = true, policy = ConfigurationPolicy.REQUIRE) -@Deprecated -public class StandbyStoreService { - - private final Logger log = LoggerFactory.getLogger(getClass()); - - private static final String MODE_PRIMARY = "primary"; - private static final String MODE_STANDBY = "standby"; - - @Deprecated - public static final String MODE_DEFAULT = MODE_PRIMARY; - @Property(options = { - @PropertyOption(name = MODE_PRIMARY, value = MODE_PRIMARY), - @PropertyOption(name = MODE_STANDBY, value = MODE_STANDBY) }, - value = MODE_DEFAULT) - @Deprecated - public static final String MODE = "mode"; - - @Deprecated - public static final int PORT_DEFAULT = 8023; - @Property(intValue = PORT_DEFAULT) - @Deprecated - public static final String PORT = "port"; - - @Deprecated - public static final String PRIMARY_HOST_DEFAULT = "127.0.0.1"; - @Property(value = PRIMARY_HOST_DEFAULT) - @Deprecated - public static final String PRIMARY_HOST = "primary.host"; - - @Deprecated - public static final int INTERVAL_DEFAULT = 5; - @Property(intValue = INTERVAL_DEFAULT) - @Deprecated - public static final String INTERVAL = "interval"; - - @Deprecated - public static final String[] ALLOWED_CLIENT_IP_RANGES_DEFAULT = new String[] {}; - @Property(cardinality = Integer.MAX_VALUE) - @Deprecated - public static final String ALLOWED_CLIENT_IP_RANGES = "primary.allowed-client-ip-ranges"; - - @Deprecated - public static final boolean SECURE_DEFAULT = false; - @Property(boolValue = SECURE_DEFAULT) - @Deprecated - public static final String SECURE = "secure"; - - @Deprecated - public static final int READ_TIMEOUT_DEFAULT = 60000; - @Property(intValue = READ_TIMEOUT_DEFAULT) - @Deprecated - public static final String READ_TIMEOUT = "standby.readtimeout"; - - @Deprecated - public static final boolean AUTO_CLEAN_DEFAULT = false; - @Property(boolValue = AUTO_CLEAN_DEFAULT) - @Deprecated - public static final String AUTO_CLEAN = "standby.autoclean"; - - @Reference(policy = STATIC, policyOption = GREEDY) - private SegmentStoreProvider storeProvider = null; - - private SegmentStore segmentStore; - - private StandbyServer primary = null; - private StandbyClient sync = null; - - private ServiceRegistration syncReg = null; - - @Activate - private void activate(ComponentContext context) throws IOException, CertificateException { - if (storeProvider != null) { - segmentStore = storeProvider.getSegmentStore(); - } else { - throw new IllegalArgumentException( - "Missing SegmentStoreProvider service"); - } - String mode = valueOf(context.getProperties().get(MODE)); - if (MODE_PRIMARY.equals(mode)) { - bootstrapMaster(context); - } else if (MODE_STANDBY.equals(mode)) { - bootstrapSlave(context); - } else { - throw new IllegalArgumentException( - "Unexpected 'mode' param, expecting 'primary' or 'standby' got " - + mode); - } - } - - @Deactivate - @Deprecated - public synchronized void deactivate() { - if (primary != null) { - primary.close(); - } - if (sync != null) { - sync.close(); - } - if (syncReg != null) { - syncReg.unregister(); - } - } - - private void bootstrapMaster(ComponentContext context) throws CertificateException, SSLException { - Dictionary props = context.getProperties(); - int port = PropertiesUtil.toInteger(props.get(PORT), PORT_DEFAULT); - String[] ranges = PropertiesUtil.toStringArray(props.get(ALLOWED_CLIENT_IP_RANGES), ALLOWED_CLIENT_IP_RANGES_DEFAULT); - boolean secure = PropertiesUtil.toBoolean(props.get(SECURE), SECURE_DEFAULT); - primary = new StandbyServer(port, segmentStore, ranges, secure); - primary.start(); - log.info("started primary on port {} with allowed ip ranges {}.", port, ranges); - } - - private void bootstrapSlave(ComponentContext context) throws SSLException { - Dictionary props = context.getProperties(); - int port = PropertiesUtil.toInteger(props.get(PORT), PORT_DEFAULT); - long interval = PropertiesUtil.toInteger(props.get(INTERVAL), INTERVAL_DEFAULT); - String host = PropertiesUtil.toString(props.get(PRIMARY_HOST), PRIMARY_HOST_DEFAULT); - boolean secure = PropertiesUtil.toBoolean(props.get(SECURE), SECURE_DEFAULT); - int readTimeout = PropertiesUtil.toInteger(props.get(READ_TIMEOUT), READ_TIMEOUT_DEFAULT); - boolean clean = PropertiesUtil.toBoolean(props.get(AUTO_CLEAN), AUTO_CLEAN_DEFAULT); - - sync = new StandbyClient(host, port, segmentStore, secure, readTimeout, clean); - Dictionary dictionary = new Hashtable(); - dictionary.put("scheduler.period", interval); - dictionary.put("scheduler.concurrent", false); - // dictionary.put("scheduler.runOn", "SINGLE"); - - syncReg = context.getBundleContext().registerService( - Runnable.class.getName(), sync, dictionary); - log.info("started standby sync with {}:{} at {} sec.", host, - port, interval); - } -} diff --git oak-tarmk-standby/src/main/resources/OSGI-INF/metatype/metatype.properties oak-tarmk-standby/src/main/resources/OSGI-INF/metatype/metatype.properties deleted file mode 100644 index 14a4ff5..0000000 --- oak-tarmk-standby/src/main/resources/OSGI-INF/metatype/metatype.properties +++ /dev/null @@ -1,50 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# suppress inspection "UnusedProperty" for whole file - -org.apache.jackrabbit.oak.plugins.segment.standby.store.StandbyStoreService.name = Apache Jackrabbit Oak TarMK Cold Standby service -org.apache.jackrabbit.oak.plugins.segment.standby.store.StandbyStoreService.description = Provides continuous backups of TarMK based repositories - -mode.name = Mode -mode.description = TarMK Cold Standby mode (primary or standby) - -port.name = Port -port.description = TCP/IP port to use - -primary.host.name = Primary Host -primary.host.description = Primary host (standby mode only) - -interval.name = Sync interval (seconds) -interval.description = Sync interval in seconds (standby mode only) - -primary.allowed-client-ip-ranges.name = Allowed IP-Ranges -primary.allowed-client-ip-ranges.description = Accept incoming requests for these host names and IP-ranges only (primary mode only) - -secure.name = Secure -secure.description = Use secure connections - -org.apache.sling.installer.configuration.persist.name = Persist configuration -org.apache.sling.installer.configuration.persist.description = Must be always disabled to avoid storing the configuration in the repository - -standby.readtimeout.name = Standby Read Timeout -standby.readtimeout.description = Timeout for requests issued from the standby instance in milliseconds - -standby.autoclean.name = Standby Automatic Cleanup -standby.autoclean.description = Call the cleanup method if the size of the store increases over 25% on a sync cycle diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/DebugSegmentStore.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/DebugSegmentStore.java deleted file mode 100644 index 026b43f..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/DebugSegmentStore.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import java.io.IOException; - -import javax.annotation.Nonnull; - -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; - -public class DebugSegmentStore implements SegmentStore { - - private final SegmentStore target; - public boolean createReadErrors; - - public DebugSegmentStore(SegmentStore targetStore) { - this.target = targetStore; - } - - @Override - public SegmentTracker getTracker() { - return this.target.getTracker(); - } - - @Nonnull - @Override - public SegmentNodeState getHead() { - return this.target.getHead(); - } - - @Override - public boolean setHead(SegmentNodeState base, SegmentNodeState head) { - return this.target.setHead(base, head); - } - - @Override - public boolean containsSegment(SegmentId id) { - return this.target.containsSegment(id); - } - - @Override - public Segment readSegment(SegmentId segmentId) { - return createReadErrors ? null : this.target.readSegment(segmentId); - } - - @Override - public void writeSegment(SegmentId id, byte[] bytes, int offset, int length) throws IOException { - this.target.writeSegment(id, bytes, offset, length); - } - - @Override - public void close() { - this.target.close(); - } - - @Override - public Blob readBlob(String reference) { - return this.target.readBlob(reference); - } - - @Override - public BlobStore getBlobStore() { - return this.target.getBlobStore(); - } - - @Override - public void gc() { - this.target.gc(); - } -} diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/NetworkErrorProxy.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/NetworkErrorProxy.java deleted file mode 100644 index 2526090..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/NetworkErrorProxy.java +++ /dev/null @@ -1,298 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import io.netty.bootstrap.Bootstrap; -import io.netty.bootstrap.ServerBootstrap; -import io.netty.buffer.ByteBuf; -import io.netty.channel.*; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.SocketChannel; -import io.netty.channel.socket.nio.NioServerSocketChannel; -import io.netty.channel.socket.nio.NioSocketChannel; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.TimeUnit; - -public class NetworkErrorProxy { - static final Logger log = LoggerFactory - .getLogger(NetworkErrorProxy.class); - - private final int inboundPort; - private final int outboundPort; - private final String host; - private ChannelFuture f; - - private ForwardHandler fh; - - EventLoopGroup bossGroup = new NioEventLoopGroup(); - EventLoopGroup workerGroup = new NioEventLoopGroup(); - - public NetworkErrorProxy(int inboundPort, String outboundHost, int outboundPort) { - this.inboundPort = inboundPort; - this.outboundPort = outboundPort; - this.host = outboundHost; - this.fh = new ForwardHandler(NetworkErrorProxy.this.host, NetworkErrorProxy.this.outboundPort); - } - - public void skipBytes(int pos, int n) { - this.fh.skipPosition = pos; - this.fh.skipBytes = n; - } - - public void flipByte(int pos) { - this.fh.flipPosition = pos; - } - - public void run() throws Exception { - try { - ServerBootstrap b = new ServerBootstrap(); - b.group(bossGroup, workerGroup) - .channel(NioServerSocketChannel.class) - .childHandler(new ChannelInitializer() { - @Override - public void initChannel(SocketChannel ch) throws Exception { - ch.pipeline().addLast(NetworkErrorProxy.this.fh); - } - }); - - f = b.bind(this.inboundPort).sync(); - } catch (Exception e) { - log.warn( - "Unable to start proxy on port " + inboundPort + ": " - + e.getMessage(), e); - } - } - - public void reset() throws Exception { - if (f == null) { - throw new Exception("proxy not started"); - } - f.channel().disconnect(); - this.fh = new ForwardHandler(NetworkErrorProxy.this.host, NetworkErrorProxy.this.outboundPort); - run(); - } - - public void close() { - if (f != null) { - f.channel().close().syncUninterruptibly(); - } - if (bossGroup != null && !bossGroup.isShuttingDown()) { - bossGroup.shutdownGracefully(0, 150, TimeUnit.MILLISECONDS).syncUninterruptibly(); - } - if (workerGroup != null && !workerGroup.isShuttingDown()) { - workerGroup.shutdownGracefully(0, 150, TimeUnit.MILLISECONDS).syncUninterruptibly(); - } - } -} - -class ForwardHandler extends ChannelInboundHandlerAdapter { - private final String targetHost; - private final int targetPort; - public long transferredBytes; - public int skipPosition; - public int skipBytes; - public int flipPosition; - private ChannelFuture remote; - - public ForwardHandler(String host, int port) { - this.targetHost = host; - this.targetPort = port; - this.flipPosition = -1; - } - - @Override - public void channelRegistered(ChannelHandlerContext ctx) throws Exception { - final ChannelHandlerContext c = ctx; - EventLoopGroup group = new NioEventLoopGroup(); - Bootstrap cb = new Bootstrap(); - cb.group(group); - cb.channel(NioSocketChannel.class); - - cb.handler(new ChannelInitializer() { - @Override - public void initChannel(SocketChannel ch) throws Exception { - SendBackHandler sbh = new SendBackHandler(c); - if (ForwardHandler.this.flipPosition >= 0) { - sbh = new BitFlipHandler(c, ForwardHandler.this.flipPosition); - } - else if (ForwardHandler.this.skipBytes > 0) { - sbh = new SwallowingHandler(c, ForwardHandler.this.skipPosition, ForwardHandler.this.skipBytes); - } - ch.pipeline().addFirst(sbh); - } - }); - remote = cb.connect(this.targetHost, this.targetPort).sync(); - - ctx.fireChannelRegistered(); - } - - @Override - public void channelUnregistered(ChannelHandlerContext ctx) throws Exception { - remote.channel().close(); - remote = null; - ctx.fireChannelUnregistered(); - } - - @Override - public void channelRead(ChannelHandlerContext ctx, Object msg) { - if (msg instanceof ByteBuf) { - ByteBuf bb = (ByteBuf)msg; - this.transferredBytes += (bb.writerIndex() - bb.readerIndex()); - } - remote.channel().write(msg); - } - - @Override - public void channelReadComplete(ChannelHandlerContext ctx) throws Exception { - remote.channel().flush(); - } - - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { - NetworkErrorProxy.log.debug(cause.getMessage(), cause); - ctx.close(); - } -} - -class SendBackHandler implements ChannelInboundHandler { - private final ChannelHandlerContext target; - public long transferredBytes; - - public SendBackHandler(ChannelHandlerContext ctx) { - this.target = ctx; - } - - @Override - public void channelRegistered(ChannelHandlerContext ctx) throws Exception { - } - - @Override - public void channelUnregistered(ChannelHandlerContext ctx) throws Exception { - } - - @Override - public void channelActive(ChannelHandlerContext ctx) throws Exception { - } - - @Override - public void channelInactive(ChannelHandlerContext ctx) throws Exception { - } - - public int messageSize(Object msg) { - if (msg instanceof ByteBuf) { - ByteBuf bb = (ByteBuf)msg; - return (bb.writerIndex() - bb.readerIndex()); - } - // unknown - return 0; - } - - public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - this.transferredBytes += messageSize(msg); - this.target.write(msg); - } - - public void channelReadComplete(ChannelHandlerContext ctx) throws Exception { - this.target.flush(); - } - - @Override - public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception { - } - - @Override - public void channelWritabilityChanged(ChannelHandlerContext ctx) throws Exception { - } - - @Override - public void handlerAdded(ChannelHandlerContext ctx) throws Exception { - } - - @Override - public void handlerRemoved(ChannelHandlerContext ctx) throws Exception { - } - - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - NetworkErrorProxy.log.debug(cause.getMessage(), cause); - this.target.close(); - } - -} - -class SwallowingHandler extends SendBackHandler { - private int skipStartingPos; - private int nrOfBytes; - - public SwallowingHandler(ChannelHandlerContext ctx, int skipStartingPos, int numberOfBytes) { - super(ctx); - this.skipStartingPos = skipStartingPos; - this.nrOfBytes = numberOfBytes; - } - - public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - if (msg instanceof ByteBuf) { - ByteBuf bb = (ByteBuf)msg; - if (this.nrOfBytes > 0) { - if (this.transferredBytes >= this.skipStartingPos) { - bb.skipBytes(this.nrOfBytes); - this.nrOfBytes = 0; - } - else { - this.skipStartingPos -= messageSize(msg); - } - } - } - super.channelRead(ctx, msg); - } - -} - -class BitFlipHandler extends SendBackHandler { - private static final Logger log = LoggerFactory - .getLogger(BitFlipHandler.class); - - private int startingPos; - - public BitFlipHandler(ChannelHandlerContext ctx, int pos) { - super(ctx); - this.startingPos = pos; - } - - public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - if (msg instanceof ByteBuf) { - ByteBuf bb = (ByteBuf)msg; - log.debug("FlipHandler. Got Buffer size: " + bb.readableBytes()); - if (this.startingPos >= 0) { - if (this.transferredBytes + bb.readableBytes() >= this.startingPos) { - int i = this.startingPos - (int)this.transferredBytes; - log.info("FlipHandler flips byte at offset " + (this.transferredBytes + i)); - byte b = (byte) (bb.getByte(i) ^ 0x01); - bb.setByte(i, b); - this.startingPos = -1; - } - } - } - super.channelRead(ctx, msg); - } - -} diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentTestUtils.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentTestUtils.java deleted file mode 100644 index fd97821..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentTestUtils.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment; - -import static java.io.File.createTempFile; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.MAX_SEGMENT_SIZE; -import static org.apache.jackrabbit.oak.plugins.segment.Segment.RECORD_ALIGN_BITS; -import static org.junit.Assert.assertEquals; - -import java.io.File; -import java.io.IOException; -import java.util.Random; - -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeStore; - -public final class SegmentTestUtils { - - private SegmentTestUtils() { } - - public static int newValidOffset(Random random) { - return random.nextInt(MAX_SEGMENT_SIZE >> RECORD_ALIGN_BITS) << RECORD_ALIGN_BITS; - } - - public static RecordId newRecordId(SegmentTracker factory, Random random) { - SegmentId id = factory.newDataSegmentId(); - RecordId r = new RecordId(id, newValidOffset(random)); - return r; - } - - public static void assertEqualStores(File d1, File d2) throws Exception { - FileStore f1 = FileStore.builder(d1).withMaxFileSize(1).withMemoryMapping(false).build(); - FileStore f2 = FileStore.builder(d2).withMaxFileSize(1).withMemoryMapping(false).build(); - try { - assertEquals(f1.getHead(), f2.getHead()); - } finally { - f1.close(); - f2.close(); - } - } - - public static void addTestContent(NodeStore store, String child) - throws CommitFailedException { - NodeBuilder builder = store.getRoot().builder(); - builder.child(child).setProperty("ts", System.currentTimeMillis()); - store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - } - - public static File createTmpTargetDir(String name) throws IOException { - File f = createTempFile(name, "dir", new File("target")); - f.delete(); - f.mkdir(); - return f; - } - -} diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/BrokenNetworkTest.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/BrokenNetworkTest.java deleted file mode 100644 index 7335782..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/BrokenNetworkTest.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby; - -import org.apache.jackrabbit.oak.plugins.segment.NetworkErrorProxy; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.standby.client.StandbyClient; -import org.apache.jackrabbit.oak.plugins.segment.standby.server.StandbyServer; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import static org.apache.jackrabbit.oak.plugins.segment.SegmentTestUtils.addTestContent; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -public class BrokenNetworkTest extends TestBase { - - @Before - public void setUp() throws Exception { - setUpServerAndTwoClients(); - } - - @After - public void after() { - closeServerAndTwoClients(); - } - - @Test - public void testProxy() throws Exception { - useProxy(false); - } - - @Test - public void testProxySSL() throws Exception { - useProxy(true); - } - - @Test - public void testProxySkippedBytes() throws Exception { - useProxy(false, 100, 1, false); - } - - @Test - public void testProxySSLSkippedBytes() throws Exception { - useProxy(true, 400, 10, false); - } - - @Test - public void testProxySkippedBytesIntermediateChange() throws Exception { - useProxy(false, 100, 1, true); - } - - @Test - public void testProxySSLSkippedBytesIntermediateChange() throws Exception { - useProxy(true, 400, 10, true); - } - - @Test - public void testProxyFlippedStartByte() throws Exception { - useProxy(false, 0, 0, 0, false); - } - - @Test - public void testProxyFlippedStartByteSSL() throws Exception { - useProxy(true, 0, 0, 0, false); - } - - @Test - public void testProxyFlippedIntermediateByte() throws Exception { - useProxy(false, 0, 0, 150, false); - } - - @Test - public void testProxyFlippedIntermediateByteSSL() throws Exception { - useProxy(true, 0, 0, 560, false); - } - - @Test - public void testProxyFlippedEndByte() throws Exception { - useProxy(false, 0, 0, 220, false); - } - - @Test - public void testProxyFlippedEndByteSSL() throws Exception { - useProxy(true, 0, 0, 575, false); - } - - // private helper - - private void useProxy(boolean ssl) throws Exception { - useProxy(ssl, 0, 0, false); - } - - private void useProxy(boolean ssl, int skipPosition, int skipBytes, boolean intermediateChange) throws Exception { - useProxy(ssl, skipPosition, skipBytes, -1, intermediateChange); - } - - private void useProxy(boolean ssl, int skipPosition, int skipBytes, int flipPosition, boolean intermediateChange) throws Exception { - NetworkErrorProxy p = new NetworkErrorProxy(proxyPort, LOCALHOST, port); - p.skipBytes(skipPosition, skipBytes); - p.flipByte(flipPosition); - p.run(); - - NodeStore store = SegmentNodeStore.builder(storeS).build(); - final StandbyServer server = new StandbyServer(port, storeS, ssl); - server.start(); - addTestContent(store, "server"); - storeS.flush(); // this speeds up the test a little bit... - - StandbyClient cl = newStandbyClient(storeC, proxyPort, ssl); - cl.run(); - - try { - if (skipBytes > 0 || flipPosition >= 0) { - assertFalse("stores are not expected to be equal", storeS.getHead().equals(storeC.getHead())); - assertEquals(storeC2.getHead(), storeC.getHead()); - - p.reset(); - if (intermediateChange) { - addTestContent(store, "server2"); - storeS.flush(); - } - cl.run(); - } - assertEquals(storeS.getHead(), storeC.getHead()); - } finally { - server.close(); - cl.close(); - p.close(); - } - } -} diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/BulkTest.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/BulkTest.java deleted file mode 100644 index a680209..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/BulkTest.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby; - -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.standby.client.StandbyClient; -import org.apache.jackrabbit.oak.plugins.segment.standby.jmx.StandbyStatusMBean; -import org.apache.jackrabbit.oak.plugins.segment.standby.server.StandbyServer; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import javax.management.MBeanServer; -import javax.management.ObjectName; - -import java.lang.management.ManagementFactory; -import java.util.Set; - -import static junit.framework.Assert.assertNotNull; -import static junit.framework.Assert.assertTrue; -import static org.junit.Assert.assertEquals; - -public class BulkTest extends TestBase { - - @Before - public void setUp() throws Exception { - setUpServerAndClient(); - } - - @After - public void after() { - closeServerAndClient(); - } - - @Test - public void test100Nodes() throws Exception { - test(100, 1, 1, 3000, 3100); - } - - @Test - public void test1000Nodes() throws Exception { - test(1000, 1, 1, 53000, 55000); - } - - @Test - public void test10000Nodes() throws Exception { - test(10000, 1, 1, 245000, 246000); - } - - @Test - public void test100000Nodes() throws Exception { - test(100000, 9, 9, 2210000, 2220000); - } - - @Test - public void test1MillionNodes() throws Exception { - test(1000000, 87, 87, 22700000, 22800000); - } - - @Test - public void test1MillionNodesUsingSSL() throws Exception { - test(1000000, 87, 87, 22700000, 22800000, true); - } - -/* - @Test - public void test10MillionNodes() throws Exception { - test(10000000, 856, 856, 223000000, 224000000); - } -*/ - - // private helper - - private void test(int number, int minExpectedSegments, int maxExpectedSegments, long minExpectedBytes, long maxExpectedBytes) throws Exception { - test(number, minExpectedSegments, maxExpectedSegments, minExpectedBytes, maxExpectedBytes, false); - } - - private void test(int number, int minExpectedSegments, int maxExpectedSegments, long minExpectedBytes, long maxExpectedBytes, - boolean useSSL) throws Exception { - NodeStore store = SegmentNodeStore.builder(storeS).build(); - NodeBuilder rootbuilder = store.getRoot().builder(); - NodeBuilder b = rootbuilder.child("store"); - for (int j=0; j<=number / 1000; j++) { - NodeBuilder builder = b.child("Folder#" + j); - for (int i = 0; i <(number < 1000 ? number : 1000); i++) { - builder.child("Test#" + i).setProperty("ts", System.currentTimeMillis()); - } - } - store.merge(rootbuilder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - storeS.flush(); - - final StandbyServer server = new StandbyServer(port, storeS, useSSL); - server.start(); - - System.setProperty(StandbyClient.CLIENT_ID_PROPERTY_NAME, "Bar"); - StandbyClient cl = newStandbyClient(storeC, port, useSSL); - - final MBeanServer jmxServer = ManagementFactory.getPlatformMBeanServer(); - ObjectName status = new ObjectName(StandbyStatusMBean.JMX_NAME + ",id=*"); - ObjectName clientStatus = new ObjectName(cl.getMBeanName()); - ObjectName serverStatus = new ObjectName(server.getMBeanName()); - - long start = System.currentTimeMillis(); - cl.run(); - - try { - Set instances = jmxServer.queryNames(status, null); - assertEquals(3, instances.size()); - - ObjectName connectionStatus = null; - for (ObjectName s : instances) { - if (!s.equals(clientStatus) && !s.equals(serverStatus)) connectionStatus = s; - } - assertNotNull(connectionStatus); - - long segments = ((Long)jmxServer.getAttribute(connectionStatus, "TransferredSegments")).longValue(); - long bytes = ((Long)jmxServer.getAttribute(connectionStatus, "TransferredSegmentBytes")).longValue(); - - System.out.println("did transfer " + segments + " segments with " + bytes + " bytes in " + (System.currentTimeMillis() - start) / 1000 + " seconds."); - - assertEquals(storeS.getHead(), storeC.getHead()); - - //compare(segments, "segment", minExpectedSegments, maxExpectedSegments); - //compare(bytes, "byte", minExpectedBytes, maxExpectedBytes); - - } finally { - server.close(); - cl.close(); - } - } - - private void compare(long current, String unit, long expectedMin, long expectedMax) { - assertTrue("current number of " + unit + "s (" + current + ") is less than minimum expected: " + expectedMin, current >= expectedMin); - assertTrue("current number of " + unit + "s (" + current + ") is bigger than maximum expected: " + expectedMax, current <= expectedMax); - } -} diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/DataStoreTestBase.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/DataStoreTestBase.java deleted file mode 100644 index c67b765..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/DataStoreTestBase.java +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby; - -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.util.Random; - -import com.google.common.io.ByteStreams; -import org.apache.jackrabbit.core.data.FileDataStore; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore; -import org.apache.jackrabbit.oak.plugins.segment.NetworkErrorProxy; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.standby.client.StandbyClient; -import org.apache.jackrabbit.oak.plugins.segment.standby.server.StandbyServer; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.Before; -import org.junit.Test; - -public class DataStoreTestBase extends TestBase { - - private static final int MB = 1024 * 1024; - - protected boolean storesCanBeEqual = false; - - @Before - public void setUp() throws Exception { - setUpServerAndClient(); - } - - protected FileStore setupFileDataStore(File d, String path) throws Exception { - FileDataStore fds = new FileDataStore(); - fds.setMinRecordLength(4092); - fds.init(path); - DataStoreBlobStore blobStore = new DataStoreBlobStore(fds); - return FileStore.builder(d) - .withMaxFileSize(1) - .withMemoryMapping(false) - .withNoCache() - .withBlobStore(blobStore) - .build(); - } - - protected byte[] addTestContent(NodeStore store, String child, int size) - throws CommitFailedException, IOException { - NodeBuilder builder = store.getRoot().builder(); - builder.child(child).setProperty("ts", System.currentTimeMillis()); - - byte[] data = new byte[size]; - new Random().nextBytes(data); - Blob blob = store.createBlob(new ByteArrayInputStream(data)); - - builder.child(child).setProperty("testBlob", blob); - - store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - return data; - } - - @Test - public void testSync() throws Exception { - final int blobSize = 5 * MB; - FileStore primary = getPrimary(); - FileStore secondary = getSecondary(); - - NodeStore store = SegmentNodeStore.builder(primary).build(); - final StandbyServer server = new StandbyServer(port, primary); - server.start(); - byte[] data = addTestContent(store, "server", blobSize); - primary.flush(); - - StandbyClient cl = newStandbyClient(secondary); - cl.run(); - - try { - assertEquals(primary.getHead(), secondary.getHead()); - } finally { - server.close(); - cl.close(); - } - - assertTrue(primary.size() < MB); - assertTrue(secondary.size() < MB); - - PropertyState ps = secondary.getHead().getChildNode("root") - .getChildNode("server").getProperty("testBlob"); - assertNotNull(ps); - assertEquals(Type.BINARY.tag(), ps.getType().tag()); - Blob b = ps.getValue(Type.BINARY); - assertEquals(blobSize, b.length()); - byte[] testData = new byte[blobSize]; - ByteStreams.readFully(b.getNewStream(), testData); - assertArrayEquals(data, testData); - } - - /** - * See OAK-4969 - */ - @Test - public void testSyncUpdatedBinaryProperty() throws Exception { - final int blobSize = 5 * MB; - FileStore primary = getPrimary(); - FileStore secondary = getSecondary(); - - NodeStore store = SegmentNodeStore.builder(primary).build(); - - try ( - StandbyClient client = newStandbyClient(secondary); - StandbyServer server = new StandbyServer(port, primary); - ) { - server.start(); - - addTestContent(store, "server", blobSize); - primary.flush(); - client.run(); - assertEquals(primary.getHead(), secondary.getHead()); - - addTestContent(store, "server", blobSize); - primary.flush(); - client.run(); - assertEquals(primary.getHead(), secondary.getHead()); - - } - - } - - @Test - public void testProxySkippedBytes() throws Exception { - useProxy(100, 1, -1, false); - } - - @Test - public void testProxySkippedBytesIntermediateChange() throws Exception { - useProxy(100, 1, -1, true); - } - - @Test - public void testProxyFlippedStartByte() throws Exception { - useProxy(0, 0, 0, false); - } - - @Test - public void testProxyFlippedIntermediateByte() throws Exception { - useProxy(0, 0, 150, false); - } - - @Test - public void testProxyFlippedIntermediateByte2() throws Exception { - useProxy(0, 0, 150000, false); - } - - @Test - public void testProxyFlippedIntermediateByteChange() throws Exception { - useProxy(0, 0, 150, true); - } - - @Test - public void testProxyFlippedIntermediateByteChange2() throws Exception { - useProxy(0, 0, 150000, true); - } - - private void useProxy(int skipPosition, int skipBytes, int flipPosition, boolean intermediateChange) throws Exception { - int blobSize = 5 * MB; - FileStore primary = getPrimary(); - FileStore secondary = getSecondary(); - - NetworkErrorProxy p = new NetworkErrorProxy(proxyPort, LOCALHOST, port); - p.skipBytes(skipPosition, skipBytes); - p.flipByte(flipPosition); - p.run(); - - NodeStore store = SegmentNodeStore.builder(primary).build(); - final StandbyServer server = new StandbyServer(port, primary); - server.start(); - byte[] data = addTestContent(store, "server", blobSize); - primary.flush(); - - StandbyClient cl = newStandbyClient(secondary, proxyPort); - cl.run(); - - try { - if (skipBytes > 0 || flipPosition >= 0) { - if (!this.storesCanBeEqual) { - assertFalse("stores are not expected to be equal", primary.getHead().equals(secondary.getHead())); - } - p.reset(); - if (intermediateChange) { - blobSize = 2 * MB; - data = addTestContent(store, "server", blobSize); - primary.flush(); - } - cl.run(); - } - assertEquals(primary.getHead(), secondary.getHead()); - } finally { - server.close(); - cl.close(); - p.close(); - } - - assertTrue(primary.size() < MB); - assertTrue(secondary.size() < MB); - - PropertyState ps = secondary.getHead().getChildNode("root") - .getChildNode("server").getProperty("testBlob"); - assertNotNull(ps); - assertEquals(Type.BINARY.tag(), ps.getType().tag()); - Blob b = ps.getValue(Type.BINARY); - assertEquals(blobSize, b.length()); - byte[] testData = new byte[blobSize]; - ByteStreams.readFully(b.getNewStream(), testData); - assertArrayEquals(data, testData); - } -} diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/ExternalPrivateStoreIT.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/ExternalPrivateStoreIT.java deleted file mode 100644 index 1dc3d96..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/ExternalPrivateStoreIT.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby; - -import static org.apache.jackrabbit.oak.plugins.segment.SegmentTestUtils.createTmpTargetDir; - -import java.io.File; -import java.io.IOException; - -import org.apache.commons.io.FileUtils; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.junit.After; - - -public class ExternalPrivateStoreIT extends DataStoreTestBase { - - private File primaryStore; - private File secondaryStore; - - @After - public void after() { - closeServerAndClient(); - try { - FileUtils.deleteDirectory(primaryStore); - FileUtils.deleteDirectory(secondaryStore); - } catch (IOException e) { - } - } - - @Override - protected FileStore setupPrimary(File d) throws Exception { - primaryStore = createTmpTargetDir("ExternalStoreITPrimary"); - return setupFileDataStore(d, primaryStore.getAbsolutePath()); - } - - @Override - protected FileStore setupSecondary(File d) throws Exception { - secondaryStore = createTmpTargetDir("ExternalStoreITSecondary"); - return setupFileDataStore(d, secondaryStore.getAbsolutePath()); - } - -} diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/ExternalSharedStoreIT.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/ExternalSharedStoreIT.java deleted file mode 100644 index 713f20b..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/ExternalSharedStoreIT.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby; - -import static org.apache.jackrabbit.oak.plugins.segment.SegmentTestUtils.createTmpTargetDir; - -import java.io.File; -import java.io.IOException; - -import org.apache.commons.io.FileUtils; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.junit.After; - -public class ExternalSharedStoreIT extends DataStoreTestBase { - private File externalStore; - - public ExternalSharedStoreIT() { - super(); - this.storesCanBeEqual = true; - } - - @After - public void after() { - closeServerAndClient(); - try { - FileUtils.deleteDirectory(externalStore); - } catch (IOException e) { - } - } - - @Override - protected FileStore setupPrimary(File d) throws Exception { - externalStore = createTmpTargetDir("ExternalCommonStoreIT"); - return setupFileDataStore(d, externalStore.getAbsolutePath()); - } - - @Override - protected FileStore setupSecondary(File d) throws Exception { - return setupFileDataStore(d, externalStore.getAbsolutePath()); - } -} diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/FailoverIPRangeTest.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/FailoverIPRangeTest.java deleted file mode 100644 index dbbc4c3..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/FailoverIPRangeTest.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby; - -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.standby.client.StandbyClient; -import org.apache.jackrabbit.oak.plugins.segment.standby.server.StandbyServer; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import static org.apache.jackrabbit.oak.plugins.segment.SegmentTestUtils.addTestContent; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -public class FailoverIPRangeTest extends TestBase { - - @Before - public void setUp() throws Exception { - setUpServerAndClient(); - } - - @After - public void after() { - closeServerAndClient(); - } - - @Test - public void testFailoverAllClients() throws Exception { - createTestWithConfig(null, true); - } - - @Test - public void testFailoverLocalClient() throws Exception { - createTestWithConfig(new String[]{"127.0.0.1"}, true); - } - - @Test - public void testFailoverLocalClientUseIPv6() throws Exception { - if (!noDualStackSupport) { - createTestWithConfig("::1", new String[]{"::1"}, true); - } - } - - @Test - public void testFailoverWrongClient() throws Exception { - createTestWithConfig(new String[]{"127.0.0.2"}, false); - } - - @Test - public void testFailoverWrongClientIPv6() throws Exception { - if (!noDualStackSupport) { - createTestWithConfig(new String[]{"::2"}, false); - } - } - - @Test - public void testFailoverLocalhost() throws Exception { - createTestWithConfig(new String[]{"localhost"}, true); - } - - @Test - public void testFailoverInvalidName() throws Exception { - createTestWithConfig(new String[]{"foobar"}, false); - } - - @Test - public void testFailoverValidIPRangeStart() throws Exception { - createTestWithConfig(new String[]{"127.0.0.1-127.0.0.2"}, true); - } - - @Test - public void testFailoverValidIPRangeEnd() throws Exception { - createTestWithConfig(new String[]{"127.0.0.0-127.0.0.1"}, true); - } - - @Test - public void testFailoverValidIPRange() throws Exception { - createTestWithConfig(new String[]{"127.0.0.0-127.0.0.2"}, true); - } - - @Test - public void testFailoverInvalidRange() throws Exception { - createTestWithConfig(new String[]{"127.0.0.2-127.0.0.1"}, false); - } - - @Test - public void testFailoverCorrectList() throws Exception { - createTestWithConfig(new String[]{"foobar","127-128","126.0.0.1", "127.0.0.0-127.255.255.255"}, true); - } - - @Test - public void testFailoverCorrectListIPv6() throws Exception { - if (!noDualStackSupport) { - createTestWithConfig(new String[]{"foobar", "122-126", "::1", "126.0.0.1", "127.0.0.0-127.255.255.255"}, true); - } - } - - @Test - public void testFailoverWrongList() throws Exception { - createTestWithConfig(new String[]{"foobar", "126.0.0.1", "::2", "128.0.0.1-255.255.255.255", "128.0.0.0-127.255.255.255"}, false); - } - - @Test - public void testFailoverCorrectListUseIPv6() throws Exception { - if (!noDualStackSupport) { - createTestWithConfig("::1", new String[]{"foobar","127-128", "0:0:0:0:0:0:0:1", "126.0.0.1", "127.0.0.0-127.255.255.255"}, true); - } - } - - @Test - public void testFailoverCorrectListIPv6UseIPv6() throws Exception { - if (!noDualStackSupport) { - createTestWithConfig("::1", new String[]{"foobar", "122-126", "::1", "126.0.0.1", "127.0.0.0-127.255.255.255"}, true); - } - } - - @Test - public void testFailoverWrongListUseIPv6() throws Exception { - if (!noDualStackSupport) { - createTestWithConfig("::1", new String[]{"foobar", "126.0.0.1", "::2", "128.0.0.1-255.255.255.255", "128.0.0.0-127.255.255.255"}, false); - } - } - - private void createTestWithConfig(String[] ipRanges, boolean expectedToWork) throws Exception { - createTestWithConfig("127.0.0.1", ipRanges, expectedToWork); - } - - private void createTestWithConfig(String host, String[] ipRanges, boolean expectedToWork) throws Exception { - NodeStore store = SegmentNodeStore.builder(storeS).build(); - final StandbyServer server = new StandbyServer(port, storeS, ipRanges); - server.start(); - addTestContent(store, "server"); - storeS.flush(); // this speeds up the test a little bit... - - StandbyClient cl = new StandbyClient(host, port, storeC, false, timeout, false); - cl.run(); - - try { - if (expectedToWork) { - assertEquals(storeS.getHead(), storeC.getHead()); - } - else { - assertFalse("stores are equal but shouldn't!", storeS.getHead().equals(storeC.getHead())); - } - } finally { - server.close(); - cl.close(); - } - - } - -} diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/FailoverMultipleClientsTestIT.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/FailoverMultipleClientsTestIT.java deleted file mode 100644 index 6a7ad0f..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/FailoverMultipleClientsTestIT.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby; - -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentTestUtils; -import org.apache.jackrabbit.oak.plugins.segment.standby.client.StandbyClient; -import org.apache.jackrabbit.oak.plugins.segment.standby.server.StandbyServer; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertEquals; - -public class FailoverMultipleClientsTestIT extends TestBase { - - @Before - public void setUp() throws Exception { - setUpServerAndTwoClients(); - } - - @After - public void after() { - closeServerAndTwoClients(); - } - - @Test - public void testMultipleClients() throws Exception { - NodeStore store = SegmentNodeStore.builder(storeS).build(); - final StandbyServer server = new StandbyServer(port, storeS); - server.start(); - SegmentTestUtils.addTestContent(store, "server"); - storeS.flush(); // this speeds up the test a little bit... - - StandbyClient cl1 = newStandbyClient(storeC); - StandbyClient cl2 = newStandbyClient(storeC2); - - try { - assertFalse("first client has invalid initial store!", storeS.getHead().equals(storeC.getHead())); - assertFalse("second client has invalid initial store!", storeS.getHead().equals(storeC2.getHead())); - assertEquals(storeC.getHead(), storeC2.getHead()); - - cl1.run(); - cl2.run(); - - assertEquals(storeS.getHead(), storeC.getHead()); - assertEquals(storeS.getHead(), storeC2.getHead()); - - cl1.stop(); - SegmentTestUtils.addTestContent(store, "test"); - storeS.flush(); - cl1.run(); - cl2.run(); - - assertEquals(storeS.getHead(), storeC2.getHead()); - assertFalse("first client updated in stopped state!", storeS.getHead().equals(storeC.getHead())); - - cl1.start(); - cl1.run(); - assertEquals(storeS.getHead(), storeC.getHead()); - } finally { - server.close(); - cl1.close(); - cl2.close(); - } - } - -} diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/FailoverSslTestIT.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/FailoverSslTestIT.java deleted file mode 100644 index 430456a..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/FailoverSslTestIT.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby; - -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.standby.client.StandbyClient; -import org.apache.jackrabbit.oak.plugins.segment.standby.server.StandbyServer; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import static org.apache.jackrabbit.oak.plugins.segment.SegmentTestUtils.addTestContent; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -public class FailoverSslTestIT extends TestBase { - - @Before - public void setUp() throws Exception { - setUpServerAndClient(); - } - - @After - public void after() { - closeServerAndClient(); - } - - @Test - public void testFailoverSecure() throws Exception { - - NodeStore store = SegmentNodeStore.builder(storeS).build(); - final StandbyServer server = new StandbyServer(port, storeS, true); - server.start(); - addTestContent(store, "server"); - storeS.flush(); // this speeds up the test a little bit... - - StandbyClient cl = newStandbyClient(storeC, port, true); - cl.run(); - - try { - assertEquals(storeS.getHead(), storeC.getHead()); - } finally { - server.close(); - cl.close(); - } - } - - @Test - public void testFailoverSecureServerPlainClient() throws Exception { - - NodeStore store = SegmentNodeStore.builder(storeS).build(); - final StandbyServer server = new StandbyServer(port, storeS, true); - server.start(); - addTestContent(store, "server"); - storeS.flush(); // this speeds up the test a little bit... - - StandbyClient cl = newStandbyClient(storeC); - cl.run(); - - try { - assertFalse("stores are equal but shouldn't!", storeS.getHead().equals(storeC.getHead())); - } finally { - server.close(); - cl.close(); - } - } - - @Test - public void testFailoverPlainServerSecureClient() throws Exception { - - NodeStore store = SegmentNodeStore.builder(storeS).build(); - final StandbyServer server = new StandbyServer(port, storeS); - server.start(); - addTestContent(store, "server"); - storeS.flush(); // this speeds up the test a little bit... - - StandbyClient cl = newStandbyClient(storeC, port, true); - cl.run(); - - try { - assertFalse("stores are equal but shouldn't!", storeS.getHead().equals(storeC.getHead())); - } finally { - server.close(); - cl.close(); - } - } -} diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/MBeanTest.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/MBeanTest.java deleted file mode 100644 index badac41..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/MBeanTest.java +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby; - -import org.apache.jackrabbit.oak.plugins.segment.standby.client.StandbyClient; -import org.apache.jackrabbit.oak.plugins.segment.standby.jmx.StandbyStatusMBean; -import org.apache.jackrabbit.oak.plugins.segment.standby.server.StandbyServer; -import org.junit.After; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; - -import javax.management.MBeanServer; -import javax.management.ObjectName; - -import java.lang.management.ManagementFactory; -import java.util.Set; - -import static junit.framework.Assert.*; - -public class MBeanTest extends TestBase { - - @Before - public void setUp() throws Exception { - setUpServerAndClient(); - } - - @After - public void after() { - closeServerAndClient(); - } - - @Test - public void testServerEmptyConfig() throws Exception { - final StandbyServer server = new StandbyServer(this.port, this.storeS); - server.start(); - - final MBeanServer jmxServer = ManagementFactory.getPlatformMBeanServer(); - ObjectName status = new ObjectName(StandbyStatusMBean.JMX_NAME + ",id=*"); - try { - Set instances = jmxServer.queryNames(status, null); - assertEquals(1, instances.size()); - status = instances.toArray(new ObjectName[0])[0]; - assertEquals(new ObjectName(server.getMBeanName()), status); - assertTrue(jmxServer.isRegistered(status)); - - assertEquals("primary", jmxServer.getAttribute(status, "Mode")); - String m = jmxServer.getAttribute(status, "Status").toString(); - if (!m.equals(StandbyStatusMBean.STATUS_STARTING) && !m.equals("channel unregistered")) - fail("unexpected Status " + m); - - assertEquals(StandbyStatusMBean.STATUS_STARTING, jmxServer.getAttribute(status, "Status")); - assertEquals(true, jmxServer.getAttribute(status, "Running")); - jmxServer.invoke(status, "stop", null, null); - assertEquals(false, jmxServer.getAttribute(status, "Running")); - assertEquals(StandbyStatusMBean.STATUS_STOPPED, jmxServer.getAttribute(status, "Status")); - jmxServer.invoke(status, "start", null, null); - - assertEquals(true, jmxServer.getAttribute(status, "Running")); - assertEquals(StandbyStatusMBean.STATUS_RUNNING, jmxServer.getAttribute(status, "Status")); - } finally { - server.close(); - } - - assertTrue(!jmxServer.isRegistered(status)); - } - - @Test - public void testClientEmptyConfigNoServer() throws Exception { - final StandbyClient client = newStandbyClient(storeC); - client.start(); - - final MBeanServer jmxServer = ManagementFactory.getPlatformMBeanServer(); - ObjectName status = new ObjectName(StandbyStatusMBean.JMX_NAME + ",id=*"); - try { - Set instances = jmxServer.queryNames(status, null); - assertEquals(1, instances.size()); - status = instances.toArray(new ObjectName[0])[0]; - assertEquals(new ObjectName(client.getMBeanName()), status); - assertTrue(jmxServer.isRegistered(status)); - - String m = jmxServer.getAttribute(status, "Mode").toString(); - if (!m.startsWith("client: ")) fail("unexpected mode " + m); - - assertEquals("0", jmxServer.getAttribute(status, "FailedRequests").toString()); - assertEquals("-1", jmxServer.getAttribute(status, "SecondsSinceLastSuccess").toString()); - - assertEquals(StandbyStatusMBean.STATUS_INITIALIZING, jmxServer.getAttribute(status, "Status")); - - assertEquals(false, jmxServer.getAttribute(status, "Running")); - jmxServer.invoke(status, "stop", null, null); - assertEquals(false, jmxServer.getAttribute(status, "Running")); - assertEquals(StandbyStatusMBean.STATUS_STOPPED, jmxServer.getAttribute(status, "Status")); - jmxServer.invoke(status, "start", null, null); - assertEquals(false, jmxServer.getAttribute(status, "Running")); - assertEquals(StandbyStatusMBean.STATUS_STOPPED, jmxServer.getAttribute(status, "Status")); - } finally { - client.close(); - } - - assertTrue(!jmxServer.isRegistered(status)); - } - - @Test - public void testClientNoServer() throws Exception { - System.setProperty(StandbyClient.CLIENT_ID_PROPERTY_NAME, "Foo"); - final StandbyClient client = newStandbyClient(storeC); - client.start(); - - final MBeanServer jmxServer = ManagementFactory.getPlatformMBeanServer(); - ObjectName status = new ObjectName(client.getMBeanName()); - try { - assertTrue(jmxServer.isRegistered(status)); - assertEquals("client: Foo", jmxServer.getAttribute(status, "Mode")); - - assertEquals("1", jmxServer.getAttribute(status, "FailedRequests").toString()); - assertEquals("-1", jmxServer.getAttribute(status, "SecondsSinceLastSuccess").toString()); - - assertEquals("1", jmxServer.invoke(status, "calcFailedRequests", null, null).toString()); - assertEquals("-1", jmxServer.invoke(status, "calcSecondsSinceLastSuccess", null, null).toString()); - } finally { - client.close(); - } - - assertTrue(!jmxServer.isRegistered(status)); - } - - @Test - @Ignore("OAK-2086") - public void testClientAndServerEmptyConfig() throws Exception { - final StandbyServer server = new StandbyServer(port, this.storeS); - server.start(); - - System.setProperty(StandbyClient.CLIENT_ID_PROPERTY_NAME, "Bar"); - final StandbyClient client = newStandbyClient(storeC); - client.start(); - - final MBeanServer jmxServer = ManagementFactory.getPlatformMBeanServer(); - ObjectName status = new ObjectName(StandbyStatusMBean.JMX_NAME + ",id=*"); - ObjectName clientStatus = new ObjectName(client.getMBeanName()); - ObjectName serverStatus = new ObjectName(server.getMBeanName()); - try { - Set instances = jmxServer.queryNames(status, null); - assertEquals(3, instances.size()); - - ObjectName connectionStatus = null; - for (ObjectName s : instances) { - if (!s.equals(clientStatus) && !s.equals(serverStatus)) connectionStatus = s; - } - assertNotNull(connectionStatus); - - assertTrue(jmxServer.isRegistered(clientStatus)); - assertTrue(jmxServer.isRegistered(serverStatus)); - assertTrue(jmxServer.isRegistered(connectionStatus)); - - String m = jmxServer.getAttribute(clientStatus, "Mode").toString(); - if (!m.startsWith("client: ")) fail("unexpected mode " + m); - - assertEquals("master", jmxServer.getAttribute(serverStatus, "Mode")); - - assertEquals(true, jmxServer.getAttribute(serverStatus, "Running")); - assertEquals(true, jmxServer.getAttribute(clientStatus, "Running")); - - assertEquals("0", jmxServer.getAttribute(clientStatus, "FailedRequests").toString()); - assertEquals("0", jmxServer.getAttribute(clientStatus, "SecondsSinceLastSuccess").toString()); - assertEquals("0", jmxServer.invoke(clientStatus, "calcFailedRequests", null, null).toString()); - assertEquals("0", jmxServer.invoke(clientStatus, "calcSecondsSinceLastSuccess", null, null).toString()); - - Thread.sleep(1000); - - assertEquals("0", jmxServer.getAttribute(clientStatus, "FailedRequests").toString()); - assertEquals("1", jmxServer.getAttribute(clientStatus, "SecondsSinceLastSuccess").toString()); - assertEquals("0", jmxServer.invoke(clientStatus, "calcFailedRequests", null, null).toString()); - assertEquals("1", jmxServer.invoke(clientStatus, "calcSecondsSinceLastSuccess", null, null).toString()); - - assertEquals(new Long(2), jmxServer.getAttribute(connectionStatus, "TransferredSegments")); - assertEquals(new Long(128), jmxServer.getAttribute(connectionStatus, "TransferredSegmentBytes")); - - // stop the master - jmxServer.invoke(serverStatus, "stop", null, null); - assertEquals(false, jmxServer.getAttribute(serverStatus, "Running")); - m = jmxServer.getAttribute(serverStatus, "Status").toString(); - if (!m.equals(StandbyStatusMBean.STATUS_STOPPED) && !m.equals("channel unregistered")) - fail("unexpected Status" + m); - - // restart the master - jmxServer.invoke(serverStatus, "start", null, null); - assertEquals(true, jmxServer.getAttribute(serverStatus, "Running")); - assertEquals(true, jmxServer.getAttribute(clientStatus, "Running")); - m = jmxServer.getAttribute(serverStatus, "Status").toString(); - if (!m.equals(StandbyStatusMBean.STATUS_STARTING) && !m.equals("channel unregistered")) - fail("unexpected Status" + m); - - // stop the slave - jmxServer.invoke(clientStatus, "stop", null, null); - assertEquals(true, jmxServer.getAttribute(serverStatus, "Running")); - assertEquals(false, jmxServer.getAttribute(clientStatus, "Running")); - assertEquals(StandbyStatusMBean.STATUS_STOPPED, jmxServer.getAttribute(clientStatus, "Status")); - - // restart the slave - jmxServer.invoke(clientStatus, "start", null, null); - assertEquals(true, jmxServer.getAttribute(clientStatus, "Running")); - assertEquals(StandbyStatusMBean.STATUS_RUNNING, jmxServer.getAttribute(clientStatus, "Status")); - - } finally { - client.close(); - server.close(); - } - - assertTrue(!jmxServer.isRegistered(clientStatus)); - assertTrue(!jmxServer.isRegistered(serverStatus)); - } -} diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/RecoverTestIT.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/RecoverTestIT.java deleted file mode 100644 index 0deec9b..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/RecoverTestIT.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby; - - -import org.apache.jackrabbit.oak.plugins.segment.DebugSegmentStore; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.standby.client.StandbyClient; -import org.apache.jackrabbit.oak.plugins.segment.standby.server.StandbyServer; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import static org.apache.jackrabbit.oak.plugins.segment.SegmentTestUtils.addTestContent; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -public class RecoverTestIT extends TestBase { - - @Before - public void setUp() throws Exception { - setUpServerAndClient(); - } - - @After - public void after() { - closeServerAndClient(); - } - - @Test - public void testBrokenConnection() throws Exception { - - NodeStore store = SegmentNodeStore.builder(storeS).build(); - DebugSegmentStore s = new DebugSegmentStore(storeS); - addTestContent(store, "server"); - storeS.flush(); - - final StandbyServer server = new StandbyServer(port, s); - s.createReadErrors = true; - server.start(); - - StandbyClient cl = newStandbyClient(storeC); - cl.run(); - - try { - assertFalse("store are not expected to be equal", storeS.getHead().equals(storeC.getHead())); - s.createReadErrors = false; - cl.run(); - assertEquals(storeS.getHead(), storeC.getHead()); - } finally { - server.close(); - cl.close(); - } - - } - - @Test - public void testLocalChanges() throws Exception { - - NodeStore store = SegmentNodeStore.builder(storeC).build(); - addTestContent(store, "client"); - - final StandbyServer server = new StandbyServer(port, storeS); - server.start(); - store = SegmentNodeStore.builder(storeS).build(); - addTestContent(store, "server"); - storeS.flush(); - - StandbyClient cl = newStandbyClient(storeC); - try { - assertFalse("stores are not expected to be equal", storeS.getHead().equals(storeC.getHead())); - cl.run(); - assertEquals(storeS.getHead(), storeC.getHead()); - } finally { - server.close(); - cl.close(); - } - - } -} diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/StandbyTest.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/StandbyTest.java deleted file mode 100644 index 67bc062..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/StandbyTest.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby; - -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.util.Random; - -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.standby.client.StandbyClient; -import org.apache.jackrabbit.oak.plugins.segment.standby.server.StandbyServer; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import com.google.common.io.ByteStreams; - -public class StandbyTest extends TestBase { - - @Before - public void setUp() throws Exception { - setUpServerAndClient(); - } - - @After - public void after() { - closeServerAndClient(); - } - - @Test - public void testSync() throws Exception { - final int mb = 1 * 1024 * 1024; - final int blobSize = 5 * mb; - FileStore primary = getPrimary(); - FileStore secondary = getSecondary(); - - NodeStore store = SegmentNodeStore.builder(primary).build(); - final StandbyServer server = new StandbyServer(port, primary); - server.start(); - byte[] data = addTestContent(store, "server", blobSize, 150); - primary.flush(); - - StandbyClient cl = newStandbyClient(secondary); - cl.run(); - - try { - assertEquals(primary.getHead(), secondary.getHead()); - } finally { - server.close(); - cl.close(); - } - - assertTrue(primary.size() > blobSize); - assertTrue(secondary.size() > blobSize); - - PropertyState ps = secondary.getHead().getChildNode("root") - .getChildNode("server").getProperty("testBlob"); - assertNotNull(ps); - assertEquals(Type.BINARY.tag(), ps.getType().tag()); - Blob b = ps.getValue(Type.BINARY); - assertEquals(blobSize, b.length()); - - byte[] testData = new byte[blobSize]; - ByteStreams.readFully(b.getNewStream(), testData); - assertArrayEquals(data, testData); - - } - - private static byte[] addTestContent(NodeStore store, String child, int size, int dataNodes) - throws CommitFailedException, IOException { - NodeBuilder builder = store.getRoot().builder(); - NodeBuilder content = builder.child(child); - content.setProperty("ts", System.currentTimeMillis()); - - byte[] data = new byte[size]; - new Random().nextBytes(data); - Blob blob = store.createBlob(new ByteArrayInputStream(data)); - content.setProperty("testBlob", blob); - - for (int i = 0; i < dataNodes; i++) { - NodeBuilder c = content.child("c" + i); - for (int j = 0; j < 1000; j++) { - c.setProperty("p" + i, "v" + i); - } - } - - store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - return data; - } -} diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/StandbyTestIT.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/StandbyTestIT.java deleted file mode 100644 index 49ccb64..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/StandbyTestIT.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby; - -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.util.Random; - -import org.apache.commons.io.FileUtils; -import org.apache.jackrabbit.oak.api.Blob; -import org.apache.jackrabbit.oak.api.CommitFailedException; -import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.api.Type; -import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.standby.client.StandbyClient; -import org.apache.jackrabbit.oak.plugins.segment.standby.server.StandbyServer; -import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.commit.EmptyHook; -import org.apache.jackrabbit.oak.spi.state.NodeBuilder; -import org.apache.jackrabbit.oak.spi.state.NodeStore; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import com.google.common.io.ByteStreams; - -public class StandbyTestIT extends TestBase { - - @Before - public void setUp() throws Exception { - setUpServerAndClient(); - } - - @After - public void after() { - closeServerAndClient(); - } - - private static byte[] addTestContent(NodeStore store, String child, int size, int dataNodes) - throws CommitFailedException, IOException { - NodeBuilder builder = store.getRoot().builder(); - NodeBuilder content = builder.child(child); - content.setProperty("ts", System.currentTimeMillis()); - - byte[] data = new byte[size]; - new Random().nextBytes(data); - Blob blob = store.createBlob(new ByteArrayInputStream(data)); - content.setProperty("testBlob", blob); - - for (int i = 0; i < dataNodes; i++) { - NodeBuilder c = content.child("c" + i); - for (int j = 0; j < 1000; j++) { - c.setProperty("p" + i, "v" + i); - } - } - - store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - return data; - } - - /** - * OAK-2430 - */ - @Test - public void testSyncLoop() throws Exception { - final int blobSize = 25 * 1024; - final int dataNodes = 5000; - - FileStore primary = getPrimary(); - FileStore secondary = getSecondary(); - - NodeStore store = SegmentNodeStore.builder(primary).build(); - final StandbyServer server = new StandbyServer(port, primary); - server.start(); - byte[] data = addTestContent(store, "server", blobSize, dataNodes); - primary.flush(); - - StandbyClient cl = newStandbyClient(secondary); - - try { - - for (int i = 0; i < 5; i++) { - String cp = store.checkpoint(Long.MAX_VALUE); - primary.flush(); - cl.run(); - assertEquals(primary.getHead(), secondary.getHead()); - assertTrue(store.release(cp)); - cl.cleanup(); - assertTrue(secondary.size() > blobSize); - } - - } finally { - server.close(); - cl.close(); - } - - assertTrue(primary.size() > blobSize); - assertTrue(secondary.size() > blobSize); - - long primaryFs = FileUtils.sizeOf(directoryS); - long secondaryFs = FileUtils.sizeOf(directoryC); - assertTrue(secondaryFs < primaryFs * 1.15); - - PropertyState ps = secondary.getHead().getChildNode("root") - .getChildNode("server").getProperty("testBlob"); - assertNotNull(ps); - assertEquals(Type.BINARY.tag(), ps.getType().tag()); - Blob b = ps.getValue(Type.BINARY); - assertEquals(blobSize, b.length()); - - byte[] testData = new byte[blobSize]; - ByteStreams.readFully(b.getNewStream(), testData); - assertArrayEquals(data, testData); - - } - -} diff --git oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/TestBase.java oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/TestBase.java deleted file mode 100644 index 338583c..0000000 --- oak-tarmk-standby/src/test/java/org/apache/jackrabbit/oak/plugins/segment/standby/TestBase.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.jackrabbit.oak.plugins.segment.standby; - -import static org.apache.jackrabbit.oak.commons.CIHelper.jenkins; -import static org.apache.jackrabbit.oak.commons.FixturesHelper.Fixture.SEGMENT_TAR; -import static org.apache.jackrabbit.oak.commons.FixturesHelper.getFixtures; -import static org.apache.jackrabbit.oak.plugins.segment.SegmentTestUtils.createTmpTargetDir; -import static org.junit.Assume.assumeFalse; -import static org.junit.Assume.assumeTrue; - -import java.io.File; -import java.io.IOException; -import java.util.Set; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.SystemUtils; -import org.apache.jackrabbit.oak.commons.CIHelper; -import org.apache.jackrabbit.oak.commons.FixturesHelper; -import org.apache.jackrabbit.oak.commons.FixturesHelper.Fixture; -import org.apache.jackrabbit.oak.plugins.segment.file.FileStore; -import org.apache.jackrabbit.oak.plugins.segment.standby.client.StandbyClient; -import org.junit.BeforeClass; - -public class TestBase { - - static final int port = Integer.getInteger("standby.server.port", - 52800); - - static final int proxyPort = Integer.getInteger( - "standby.proxy.port", 51913); - - final static String LOCALHOST = "127.0.0.1"; - - static final int timeout = Integer.getInteger("standby.test.timeout", 500); - - private static final Set FIXTURES = FixturesHelper.getFixtures(); - - File directoryS; - FileStore storeS; - - File directoryC; - FileStore storeC; - - File directoryC2; - FileStore storeC2; - - @BeforeClass - public static void assumeIsNotJenkins() { - assumeFalse(jenkins()); - } - - /* - Java 6 on Windows doesn't support dual IP stacks, so we will skip our IPv6 - tests. - */ - protected final boolean noDualStackSupport = SystemUtils.IS_OS_WINDOWS && SystemUtils.IS_JAVA_1_6; - - @BeforeClass - public static void assumptions() { - assumeTrue(!CIHelper.travis()); - assumeTrue(FIXTURES.contains(Fixture.SEGMENT_MK) || getFixtures().contains(SEGMENT_TAR)); - } - - public void setUpServerAndClient() throws Exception { - // server - directoryS = createTmpTargetDir(getClass().getSimpleName()+"-Server"); - storeS = setupPrimary(directoryS); - - // client - directoryC = createTmpTargetDir(getClass().getSimpleName()+"-Client"); - storeC = setupSecondary(directoryC); - } - - private static FileStore newFileStore(File directory) throws Exception { - return FileStore.builder(directory) - .withMaxFileSize(1) - .withMemoryMapping(false) - .withNoCache() - .build(); - } - - protected FileStore setupPrimary(File directory) throws Exception { - return newFileStore(directory); - } - - protected FileStore getPrimary() { - return storeS; - } - - protected FileStore setupSecondary(File directory) throws Exception { - return newFileStore(directoryC); - } - - protected FileStore getSecondary() { - return storeC; - } - - public void setUpServerAndTwoClients() throws Exception { - setUpServerAndClient(); - - directoryC2 = createTmpTargetDir(getClass().getSimpleName()+"-Client2"); - storeC2 = newFileStore(directoryC2); - } - - public void closeServerAndClient() { - storeS.close(); - storeC.close(); - try { - FileUtils.deleteDirectory(directoryS); - FileUtils.deleteDirectory(directoryC); - } catch (IOException e) { - } - } - - public void closeServerAndTwoClients() { - closeServerAndClient(); - storeC2.close(); - try { - FileUtils.deleteDirectory(directoryC2); - } catch (IOException e) { - } - } - - public static int getTestTimeout() { - return timeout; - } - - public StandbyClient newStandbyClient(FileStore store) throws Exception { - return newStandbyClient(store, port, false); - } - - public StandbyClient newStandbyClient(FileStore store, int port) - throws Exception { - return newStandbyClient(store, port, false); - } - - public StandbyClient newStandbyClient(FileStore store, int port, - boolean secure) throws Exception { - return new StandbyClient(LOCALHOST, port, store, secure, timeout, false); - } - -} diff --git oak-tarmk-standby/src/test/resources/logback-test.xml oak-tarmk-standby/src/test/resources/logback-test.xml deleted file mode 100644 index 234aa88..0000000 --- oak-tarmk-standby/src/test/resources/logback-test.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - %date{HH:mm:ss.SSS} %-5level %-40([%thread] %F:%L) %msg%n - - - - - target/unit-tests.log - - %date{HH:mm:ss.SSS} %-5level %-40([%thread] %F:%L) %msg%n - - - - - - - - - - - diff --git pom.xml pom.xml index e85de82..f0e4ed7 100644 --- pom.xml +++ pom.xml @@ -49,7 +49,6 @@ oak-solr-osgi oak-auth-external oak-auth-ldap - oak-tarmk-standby oak-run oak-it-osgi oak-pojosr @@ -58,7 +57,6 @@ oak-exercise oak-examples oak-it - oak-segment oak-segment-tar