Index: oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ClusterView.java =================================================================== --- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ClusterView.java (revision 1730495) +++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ClusterView.java (working copy) @@ -128,7 +128,7 @@ * background read for and thus still have a backlog * @return the ClusterView representing the provided info */ - static ClusterView fromDocument(int localInstanceId, ClusterViewDocument clusterViewDoc, Set backlogIds) { + static ClusterView fromDocument(int localInstanceId, String clusterId, ClusterViewDocument clusterViewDoc, Set backlogIds) { Set activeIds = clusterViewDoc.getActiveIds(); Set deactivatingIds = new HashSet(); deactivatingIds.addAll(clusterViewDoc.getRecoveringIds()); @@ -142,7 +142,11 @@ throw new IllegalStateException( "not all backlogIds (" + backlogIds + ") are part of inactiveIds (" + clusterViewDoc.getInactiveIds() + ")"); } - return new ClusterView(clusterViewDoc.getViewSeqNum(), backlogIds.size() == 0, clusterViewDoc.getClusterViewId(), + // clusterViewDoc.getClusterViewId() used to provide the 'clusterViewId' + // as defined within the settings collection of the DocumentStore. + // with OAK-4006 however we're changing this to use one clusterId + // within oak - provided and controlled by ClusterRepositoryInfo. + return new ClusterView(clusterViewDoc.getViewSeqNum(), backlogIds.size() == 0, clusterId, localInstanceId, activeIds, deactivatingIds, inactiveIds); } @@ -180,7 +184,7 @@ builder.object(); builder.key("seq").value(viewSeqNum); builder.key("final").value(viewFinal); -// builder.key("id").value(clusterViewId); + builder.key("id").value(clusterViewId); builder.key("me").value(localId); builder.key("active").array(); for (Iterator it = activeIds.iterator(); it.hasNext();) { Index: oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ClusterViewDocument.java =================================================================== --- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ClusterViewDocument.java (revision 1730495) +++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ClusterViewDocument.java (working copy) @@ -28,7 +28,6 @@ import java.util.Iterator; import java.util.Map; import java.util.Set; -import java.util.UUID; import org.apache.jackrabbit.oak.commons.json.JsopBuilder; import org.slf4j.Logger; @@ -68,14 +67,6 @@ // keys that we store in the root document - and in the history /** - * document key that stores the stable id of the cluster (will never change) - * (Note: a better term would have been just clusterId - but that one is - * already occupied with what should actually be called clusterNodeId or - * just nodeId) - **/ - static final String CLUSTER_VIEW_ID_KEY = "clusterViewId"; - - /** * document key that stores the monotonically incrementing sequence number * of the cluster view. Any update will increase this by 1 **/ @@ -138,9 +129,6 @@ /** the monotonically incrementing sequence number of this cluster view **/ private final long viewSeqNum; - /** the stable id of this cluster **/ - private final String clusterViewId; - /** the ids of instances that are active at this moment **/ private final Integer[] activeIds; @@ -234,12 +222,9 @@ newViewSeqNum = 1L; updateOp.setNew(true); // paranoia as that's already set above updateOp.set(VIEW_SEQ_NUM_KEY, newViewSeqNum); - // first view ever => choose a new unique clusterViewId - String clusterViewId = UUID.randomUUID().toString(); - updateOp.set(CLUSTER_VIEW_ID_KEY, clusterViewId); updateOps.add(updateOp); - logger.debug("updateAndRead: trying to create the first ever clusterView - hence {}={} and {}={}", VIEW_SEQ_NUM_KEY, - newViewSeqNum, CLUSTER_VIEW_ID_KEY, clusterViewId); + logger.debug("updateAndRead: trying to create the first ever clusterView - hence {}={}", VIEW_SEQ_NUM_KEY, + newViewSeqNum); if (!documentNodeStore.getDocumentStore().create(Collection.SETTINGS, updateOps)) { logger.debug("updateAndRead: someone else just created the first view ever while I tried - reread that one later"); return null; @@ -446,7 +431,6 @@ if (doc == null) { throw new IllegalArgumentException("doc must not be null"); } - this.clusterViewId = (String) doc.get(CLUSTER_VIEW_ID_KEY); this.viewSeqNum = (Long) doc.get(VIEW_SEQ_NUM_KEY); this.createdAt = (String) doc.get(CREATED_KEY); Object creatorId = doc.get(CREATOR_KEY); @@ -513,7 +497,7 @@ @Override public String toString() { - return "a ClusterView[valid=" + isValid() + ", viewSeqNum=" + viewSeqNum + ", clusterViewId=" + clusterViewId + return "a ClusterView[valid=" + isValid() + ", viewSeqNum=" + viewSeqNum + ", activeIds=" + arrayToCsv(activeIds) + ", recoveringIds=" + arrayToCsv(recoveringIds) + ", inactiveIds=" + arrayToCsv(inactiveIds) + "]"; } @@ -543,14 +527,6 @@ return viewSeqNum; } - /** - * Returns a UUID representing this cluster - will never change, propagates - * from view to view - **/ - String getClusterViewId() { - return clusterViewId; - } - private boolean matches(Set activeIds, Set recoveringIds, Set inactiveIds) { if (!matches(this.activeIds, activeIds)) { return false; Index: oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentDiscoveryLiteService.java =================================================================== --- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentDiscoveryLiteService.java (revision 1730495) +++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentDiscoveryLiteService.java (working copy) @@ -39,10 +39,10 @@ import org.apache.jackrabbit.commons.SimpleValueFactory; import org.apache.jackrabbit.oak.api.Descriptors; import org.apache.jackrabbit.oak.osgi.OsgiWhiteboard; +import org.apache.jackrabbit.oak.plugins.identifier.ClusterRepositoryInfo; import org.apache.jackrabbit.oak.spi.commit.CommitInfo; import org.apache.jackrabbit.oak.spi.commit.Observer; import org.apache.jackrabbit.oak.spi.state.NodeState; -import org.apache.jackrabbit.oak.spi.state.NodeStore; import org.osgi.framework.Version; import org.osgi.service.component.ComponentContext; import org.slf4j.Logger; @@ -432,7 +432,8 @@ } if (changed) { - ClusterView v = ClusterView.fromDocument(clusterNodeId, newView, backlogNodes.keySet()); + String clusterId = ClusterRepositoryInfo.getOrCreateId(documentNodeStore); + ClusterView v = ClusterView.fromDocument(clusterNodeId, clusterId, newView, backlogNodes.keySet()); ClusterView previousView = previousClusterView; previousClusterView = v; hasInstancesWithBacklog = newHasInstancesWithBacklog; Index: oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java =================================================================== --- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java (revision 1730495) +++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java (working copy) @@ -34,6 +34,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Dictionary; import java.util.HashMap; import java.util.Hashtable; @@ -55,6 +56,8 @@ import org.apache.felix.scr.annotations.Reference; import org.apache.felix.scr.annotations.ReferenceCardinality; import org.apache.felix.scr.annotations.ReferencePolicy; +import org.apache.jackrabbit.commons.SimpleValueFactory; +import org.apache.jackrabbit.oak.api.Descriptors; import org.apache.jackrabbit.oak.api.jmx.CacheStatsMBean; import org.apache.jackrabbit.oak.api.jmx.CheckpointMBean; import org.apache.jackrabbit.oak.cache.CacheStats; @@ -82,6 +85,7 @@ import org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardExecutor; import org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardUtils; import org.apache.jackrabbit.oak.stats.StatisticsProvider; +import org.apache.jackrabbit.oak.util.GenericDescriptors; import org.osgi.framework.Bundle; import org.osgi.framework.BundleException; import org.osgi.framework.Constants; @@ -333,13 +337,6 @@ public static final String PROP_DS_TYPE = "documentStoreType"; private DocumentStoreType documentStoreType; - private static final String DEFAULT_SHARED_DS_REPO_ID = ""; - @Property(value = DEFAULT_SHARED_DS_REPO_ID, - label = "SharedDataStore RepositoryID", - description = "Custom RepositoryID for SharedDataStore. This overrides any generated value." - ) - public static final String PROP_SHARED_DS_REPO_ID = "sharedDSRepoId"; - @Reference private StatisticsProvider statisticsProvider; @@ -477,12 +474,18 @@ mkBuilder.setExecutor(executor); mk = mkBuilder.open(); + // ensure a clusterId is initialized + // and expose it as 'oak.clusterid' repository descriptor + GenericDescriptors clusterIdDesc = new GenericDescriptors(); + clusterIdDesc.put(ClusterRepositoryInfo.OAK_CLUSTERID_REPOSITORY_DESCRIPTOR_KEY, + new SimpleValueFactory().createValue( + ClusterRepositoryInfo.getOrCreateId(mk.getNodeStore())), true, false); + whiteboard.register(Descriptors.class, clusterIdDesc, Collections.emptyMap()); + // If a shared data store register the repo id in the data store if (SharedDataStoreUtils.isShared(blobStore)) { - String customRepoID = PropertiesUtil.toString(prop(PROP_SHARED_DS_REPO_ID), DEFAULT_SHARED_DS_REPO_ID); - try { - String repoId = ClusterRepositoryInfo.createId(mk.getNodeStore(), customRepoID); + String repoId = ClusterRepositoryInfo.getOrCreateId(mk.getNodeStore()); ((SharedDataStore) blobStore).addMetadataRecord(new ByteArrayInputStream(new byte[0]), SharedDataStoreUtils.SharedStoreRecordType.REPOSITORY.getNameFromId(repoId)); } catch (Exception e) { @@ -689,7 +692,7 @@ if (store.getBlobStore() instanceof GarbageCollectableBlobStore) { BlobGarbageCollector gc = store.createBlobGarbageCollector(blobGcMaxAgeInSecs, - ClusterRepositoryInfo.getId(mk.getNodeStore())); + ClusterRepositoryInfo.getOrCreateId(mk.getNodeStore())); registrations.add(registerMBean(whiteboard, BlobGCMBean.class, new BlobGC(gc, executor), BlobGCMBean.TYPE, "Document node store blob garbage collection")); } Index: oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/identifier/ClusterRepositoryInfo.java =================================================================== --- oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/identifier/ClusterRepositoryInfo.java (revision 1730495) +++ oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/identifier/ClusterRepositoryInfo.java (working copy) @@ -18,7 +18,8 @@ import java.util.UUID; -import com.google.common.base.Strings; +import javax.annotation.CheckForNull; + import org.apache.jackrabbit.oak.api.CommitFailedException; import org.apache.jackrabbit.oak.api.Type; import org.apache.jackrabbit.oak.spi.commit.CommitInfo; @@ -26,69 +27,75 @@ import org.apache.jackrabbit.oak.spi.state.NodeBuilder; import org.apache.jackrabbit.oak.spi.state.NodeState; import org.apache.jackrabbit.oak.spi.state.NodeStore; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import javax.annotation.CheckForNull; - /** * Utility class to manage a unique cluster/repository id for the cluster. */ public class ClusterRepositoryInfo { + private static final Logger log = LoggerFactory.getLogger(ClusterRepositoryInfo.class); + public static final String OAK_CLUSTERID_REPOSITORY_DESCRIPTOR_KEY = "oak.clusterid"; public static final String CLUSTER_CONFIG_NODE = ":clusterConfig"; public static final String CLUSTER_ID_PROP = ":clusterId"; /** - * Adds a new uuid for the repository in the property /:clusterConfig/:clusterId if not available - * + * Gets the {# CLUSTER_ID_PROP} if available, if it doesn't it + * creates it and returns the newly created one (or if that + * happened concurrently and another cluster instance won, + * return that one) + *

+ * Note that this method doesn't require synchronization as + * concurrent execution within the VM would be covered + * within NodeStore's merge and result in a conflict for + * one of the two threads - in which case the looser would + * re-read and find the clusterId set. + * * @param store the NodeStore instance - * @return the repository id - * @throws CommitFailedException + * @return the persistent clusterId */ - public static String createId(NodeStore store) throws CommitFailedException { - return createId(store, null); - } + @CheckForNull + public static String getOrCreateId(NodeStore store) { + // first try to read an existing clusterId + NodeState root = store.getRoot(); + NodeState node = root.getChildNode(CLUSTER_CONFIG_NODE); + if (node.exists() && node.hasProperty(CLUSTER_ID_PROP)) { + // clusterId is set - this is the normal case + return node.getProperty(CLUSTER_ID_PROP).getValue(Type.STRING); + } + + // otherwise either the config node or the property doesn't exist. + // then try to create it - but since this could be executed concurrently + // in a cluster, it might result in a conflict. in that case, re-read + // the node + NodeBuilder builder = root.builder(); + + // choose a new random clusterId + String newRandomClusterId = UUID.randomUUID().toString(); + builder.child(CLUSTER_CONFIG_NODE).setProperty(CLUSTER_ID_PROP, newRandomClusterId); + try { + store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); - /** - * Adds a new uuid for the repository in the property /:clusterConfig/:clusterId Or - * update the id with the customId passed. - * - * @param store the NodeStore instance - * @param customId customId - * @return the repository id - * @throws CommitFailedException - */ - public static String createId(NodeStore store, String customId) throws CommitFailedException { - NodeBuilder root = store.getRoot().builder(); - if (!root.hasChildNode(CLUSTER_CONFIG_NODE) || - !root.getChildNode(CLUSTER_CONFIG_NODE).hasProperty(CLUSTER_ID_PROP)) { - // Set the customId if available - String id = (!Strings.isNullOrEmpty(customId) ? customId : UUID.randomUUID().toString()); - root.child(CLUSTER_CONFIG_NODE).setProperty(CLUSTER_ID_PROP, id); - store.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY); - return id; - } else { - String currId = root.getChildNode(CLUSTER_CONFIG_NODE).getProperty(CLUSTER_ID_PROP).getValue(Type.STRING); - if (!Strings.isNullOrEmpty(customId) && !customId.equals(currId)) { - root.child(CLUSTER_CONFIG_NODE).setProperty(CLUSTER_ID_PROP, customId); - store.merge(root, EmptyHook.INSTANCE, CommitInfo.EMPTY); - currId = customId; + // great, we were able to create it, all good. + log.info("getOrCreateId: created a new clusterId=" + newRandomClusterId); + return newRandomClusterId; + } catch (CommitFailedException e) { + // if we can't commit, then another instance in the cluster + // set the clusterId concurrently. in which case we should now + // see that one + root = store.getRoot(); + node = root.getChildNode(CLUSTER_CONFIG_NODE); + if (node.exists() && node.hasProperty(CLUSTER_ID_PROP)) { + // clusterId is set - this is the normal case + return node.getProperty(CLUSTER_ID_PROP).getValue(Type.STRING); } - return currId; + + // this should not happen + String path = "/" + CLUSTER_CONFIG_NODE + "/" + CLUSTER_ID_PROP; + log.error("getOrCreateId: both setting and then reading of " + path + "failed"); + throw new IllegalStateException("Both setting and then reading of " + path + " failed"); } } - /** - * Retrieves the {# CLUSTER_ID_PROP} - * - * @param store the NodeStore instance - * @return the repository id - */ - @CheckForNull - public static String getId(NodeStore store) { - NodeState state = store.getRoot().getChildNode(CLUSTER_CONFIG_NODE); - if (state.hasProperty(CLUSTER_ID_PROP)) { - return state.getProperty(CLUSTER_ID_PROP).getValue(Type.STRING); - } - return null; - } } Index: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/blob/ClusterRepositoryInfoTest.java =================================================================== --- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/blob/ClusterRepositoryInfoTest.java (revision 1730495) +++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/blob/ClusterRepositoryInfoTest.java (working copy) @@ -17,23 +17,30 @@ package org.apache.jackrabbit.oak.plugins.blob; import static org.hamcrest.CoreMatchers.instanceOf; -import junit.framework.Assert; +import java.io.File; +import java.io.IOException; + import org.apache.commons.io.FileUtils; +import org.apache.jackrabbit.oak.api.CommitFailedException; +import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils; import org.apache.jackrabbit.oak.plugins.document.DocumentMKBuilderProvider; import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore; -import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreUtils; import org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore; import org.apache.jackrabbit.oak.plugins.identifier.ClusterRepositoryInfo; import org.apache.jackrabbit.oak.spi.blob.BlobStore; +import org.apache.jackrabbit.oak.spi.commit.CommitInfo; +import org.apache.jackrabbit.oak.spi.commit.EmptyHook; +import org.apache.jackrabbit.oak.spi.state.NodeBuilder; +import org.apache.jackrabbit.oak.spi.state.NodeState; +import org.apache.jackrabbit.oak.spi.state.NodeStore; import org.junit.After; import org.junit.Assume; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; -import java.io.File; -import java.io.IOException; +import junit.framework.Assert; /** * Tests the ClusterRepositoryInfo unique cluster repository id. @@ -44,6 +51,19 @@ @Rule public DocumentMKBuilderProvider builderProvider = new DocumentMKBuilderProvider(); + /** + * test method to change the clusterId explicitly + * + * @throws CommitFailedException + **/ + private static void setId(NodeStore store, String clusterId) throws CommitFailedException { + NodeState root = store.getRoot(); + NodeBuilder builder = root.builder(); + builder.child(ClusterRepositoryInfo.CLUSTER_CONFIG_NODE) + .setProperty(ClusterRepositoryInfo.CLUSTER_ID_PROP, clusterId); + store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY); + } + @BeforeClass public static void setup() { try { @@ -61,14 +81,14 @@ .setDocumentStore(new MemoryDocumentStore()) .setBlobStore(blobStore) .getNodeStore(); - String repoId1 = ClusterRepositoryInfo.createId(ds1); + String repoId1 = ClusterRepositoryInfo.getOrCreateId(ds1); DocumentNodeStore ds2 = builderProvider.newBuilder() .setAsyncDelay(0) .setDocumentStore(new MemoryDocumentStore()) .setBlobStore(blobStore) .getNodeStore(); - String repoId2 = ClusterRepositoryInfo.createId(ds2); + String repoId2 = ClusterRepositoryInfo.getOrCreateId(ds2); Assert.assertNotSame(repoId1, repoId2); } @@ -82,7 +102,7 @@ .setClusterId(1) .setBlobStore(blobStore) .getNodeStore(); - String repoId1 = ClusterRepositoryInfo.createId(ds1); + String repoId1 = ClusterRepositoryInfo.getOrCreateId(ds1); ds1.runBackgroundOperations(); DocumentNodeStore ds2 = builderProvider.newBuilder() @@ -91,24 +111,26 @@ .setClusterId(2) .setBlobStore(blobStore) .getNodeStore(); - String repoId2 = ClusterRepositoryInfo.createId(ds2); + String repoId2 = ClusterRepositoryInfo.getOrCreateId(ds2); // Since the same cluster the ids should be equal Assert.assertEquals(repoId1, repoId2); } - @Test - public void checkGetIdWhenNotRegistered() { - MemoryDocumentStore store = new MemoryDocumentStore(); - DocumentNodeStore ds1 = builderProvider.newBuilder() - .setAsyncDelay(0) - .setDocumentStore(store) - .setClusterId(1) - .getNodeStore(); - // Should be null and no NPE - String id = ClusterRepositoryInfo.getId(ds1); - Assert.assertNull(id); - } + // below test doesn't make sense anymore in the context + // of getOrCreateId (OAK-4006) where that never returns null +// @Test +// public void checkGetIdWhenNotRegistered() { +// MemoryDocumentStore store = new MemoryDocumentStore(); +// DocumentNodeStore ds1 = builderProvider.newBuilder() +// .setAsyncDelay(0) +// .setDocumentStore(store) +// .setClusterId(1) +// .getNodeStore(); +// // Should be null and no NPE +// String id = ClusterRepositoryInfo.getOrCreateId(ds1); +// Assert.assertNull(id); +// } @Test public void checkCustomId() throws Exception { @@ -118,11 +140,12 @@ .setDocumentStore(store) .setClusterId(1) .getNodeStore(); - String repoId1 = ClusterRepositoryInfo.createId(ds1, "yyyyyyy"); + String repoId1 = "yyyyyyy"; + setId(ds1, repoId1); ds1.runBackgroundOperations(); - String id = ClusterRepositoryInfo.getId(ds1); - Assert.assertEquals(id, "yyyyyyy"); + String id = ClusterRepositoryInfo.getOrCreateId(ds1); + Assert.assertEquals(id, repoId1); } @Test @@ -133,13 +156,13 @@ .setDocumentStore(store) .setClusterId(1) .getNodeStore(); - String repoId1 = ClusterRepositoryInfo.createId(ds1, null); + String repoId1 = ClusterRepositoryInfo.getOrCreateId(ds1); ds1.runBackgroundOperations(); // Change with a custom ID - ClusterRepositoryInfo.createId(ds1, "xxxxxxxx"); + setId(ds1, "xxxxxxxx"); - String id = ClusterRepositoryInfo.getId(ds1); + String id = ClusterRepositoryInfo.getOrCreateId(ds1); Assert.assertNotNull(id); Assert.assertEquals(id, "xxxxxxxx"); } Index: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/ClusterViewBuilder.java =================================================================== --- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/ClusterViewBuilder.java (revision 1730495) +++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/ClusterViewBuilder.java (working copy) @@ -30,12 +30,10 @@ private final Set backlogIds = new HashSet(); private final Set inactiveIds = new HashSet(); private final long viewSeqNum; - private final String clusterViewId; private final int myId; - ClusterViewBuilder(long viewSeqNum, String clusterViewId, int myId) { + ClusterViewBuilder(long viewSeqNum, int myId) { this.viewSeqNum = viewSeqNum; - this.clusterViewId = clusterViewId; this.myId = myId; } @@ -85,13 +83,12 @@ doc.put(ClusterViewDocument.INACTIVE_KEY, asArrayStr(inactiveIds)); doc.put(ClusterViewDocument.RECOVERING_KEY, asArrayStr(recoveringIds)); doc.put(ClusterViewDocument.ACTIVE_KEY, asArrayStr(activeIds)); - doc.put(ClusterViewDocument.CLUSTER_VIEW_ID_KEY, clusterViewId); ClusterViewDocument clusterViewDoc = new ClusterViewDocument(doc); return clusterViewDoc; } - public ClusterView asView() { - return ClusterView.fromDocument(myId, asDoc(), backlogIds); + public ClusterView asView(String clusterId) { + return ClusterView.fromDocument(myId, clusterId, asDoc(), backlogIds); } private String asArrayStr(Set ids) { Index: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/ClusterViewDocumentTest.java =================================================================== --- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/ClusterViewDocumentTest.java (revision 1730495) +++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/ClusterViewDocumentTest.java (working copy) @@ -79,18 +79,16 @@ @Test public void testConstructor() { - ClusterViewDocument doc = new ClusterViewBuilder(1, "2", 3).active(3, 4).asDoc(); + ClusterViewDocument doc = new ClusterViewBuilder(1, 3).active(3, 4).asDoc(); assertNotNull(doc); - assertEquals("2", doc.getClusterViewId()); assertEquals(0, doc.getRecoveringIds().size()); assertEquals(0, doc.getInactiveIds().size()); assertEquals(2, doc.getActiveIds().size()); assertTrue(doc.getActiveIds().contains(3)); assertTrue(doc.getActiveIds().contains(4)); - doc = new ClusterViewBuilder(1, "2", 3).active(3, 4).backlogs(5).inactive(5, 6).asDoc(); + doc = new ClusterViewBuilder(1, 3).active(3, 4).backlogs(5).inactive(5, 6).asDoc(); assertNotNull(doc); - assertEquals("2", doc.getClusterViewId()); assertEquals(0, doc.getRecoveringIds().size()); assertEquals(2, doc.getInactiveIds().size()); assertEquals(2, doc.getActiveIds().size()); @@ -99,10 +97,9 @@ assertTrue(doc.getInactiveIds().contains(5)); assertTrue(doc.getInactiveIds().contains(6)); - doc = new ClusterViewBuilder(11, "x", 4).active(3, 4, 5).recovering(6).inactive(7, 8).asDoc(); + doc = new ClusterViewBuilder(11, 4).active(3, 4, 5).recovering(6).inactive(7, 8).asDoc(); assertNotNull(doc); assertEquals(11, doc.getViewSeqNum()); - assertEquals("x", doc.getClusterViewId()); assertEquals(1, doc.getRecoveringIds().size()); assertEquals(2, doc.getInactiveIds().size()); assertEquals(3, doc.getActiveIds().size()); @@ -165,8 +162,6 @@ Set inactiveIds = null; // first ever view: ClusterViewDocument doc = ClusterViewDocument.readOrUpdate(ns, activeIds, recoveringIds, inactiveIds); - final String id = doc.getClusterViewId(); - assertTrue(id != null && id.length() > 0); String createdAt = doc.getCreatedAt(); assertTrue(createdAt != null && createdAt.length() > 0); long createdBy = doc.getCreatedBy(); @@ -185,7 +180,6 @@ // and now add a new active id activeIds.add(3); doc = ClusterViewDocument.readOrUpdate(ns, activeIds, recoveringIds, inactiveIds); - assertEquals(id, doc.getClusterViewId()); createdAt = doc.getCreatedAt(); assertTrue(createdAt != null && createdAt.length() > 0); createdBy = doc.getCreatedBy(); @@ -206,7 +200,6 @@ recoveringIds = new HashSet(); recoveringIds.add(4); doc = ClusterViewDocument.readOrUpdate(ns, activeIds, recoveringIds, inactiveIds); - assertEquals(id, doc.getClusterViewId()); createdAt = doc.getCreatedAt(); assertTrue(createdAt != null && createdAt.length() > 0); createdBy = doc.getCreatedBy(); @@ -229,7 +222,6 @@ inactiveIds = new HashSet(); inactiveIds.add(4); doc = ClusterViewDocument.readOrUpdate(ns, activeIds, recoveringIds, inactiveIds); - assertEquals(id, doc.getClusterViewId()); createdAt = doc.getCreatedAt(); assertTrue(createdAt != null && createdAt.length() > 0); createdBy = doc.getCreatedBy(); Index: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/ClusterViewTest.java =================================================================== --- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/ClusterViewTest.java (revision 1730495) +++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/ClusterViewTest.java (working copy) @@ -112,15 +112,15 @@ @Test public void testOneActiveOnly() throws Exception { - String clusterViewId = UUID.randomUUID().toString(); - ClusterViewBuilder builder = new ClusterViewBuilder(10, clusterViewId, 21); - ClusterView view = builder.active(21).asView(); + String clusterId = UUID.randomUUID().toString(); + ClusterViewBuilder builder = new ClusterViewBuilder(10, 21); + ClusterView view = builder.active(21).asView(clusterId); // {"seq":10,"id":"35f60ed3-508d-4a81-b812-89f07f57db20","me":2,"active":[2],"deactivating":[],"inactive":[3]} JsonObject o = asJsonObject(view); Map props = o.getProperties(); assertEquals("10", props.get("seq")); -// assertEquals(clusterViewId, unwrapString(props.get("id"))); + assertEquals(clusterId, unwrapString(props.get("id"))); assertEquals("21", props.get("me")); assertEquals(asJsonArray(21), props.get("active")); assertEquals(asJsonArray(), props.get("deactivating")); @@ -129,15 +129,15 @@ @Test public void testOneActiveOneInactive() throws Exception { - String clusterViewId = UUID.randomUUID().toString(); - ClusterViewBuilder builder = new ClusterViewBuilder(10, clusterViewId, 2); - ClusterView view = builder.active(2).inactive(3).asView(); + String clusterId = UUID.randomUUID().toString(); + ClusterViewBuilder builder = new ClusterViewBuilder(10, 2); + ClusterView view = builder.active(2).inactive(3).asView(clusterId); // {"seq":10,"id":"35f60ed3-508d-4a81-b812-89f07f57db20","me":2,"active":[2],"deactivating":[],"inactive":[3]} JsonObject o = asJsonObject(view); Map props = o.getProperties(); assertEquals("10", props.get("seq")); -// assertEquals(clusterViewId, unwrapString(props.get("id"))); + assertEquals(clusterId, unwrapString(props.get("id"))); assertEquals("2", props.get("me")); assertEquals(asJsonArray(2), props.get("active")); assertEquals(asJsonArray(), props.get("deactivating")); @@ -146,16 +146,16 @@ @Test public void testSeveralActiveOneInactive() throws Exception { - String clusterViewId = UUID.randomUUID().toString(); - ClusterViewBuilder builder = new ClusterViewBuilder(10, clusterViewId, 2); - ClusterView view = builder.active(2, 5, 6).inactive(3).asView(); + String clusterId = UUID.randomUUID().toString(); + ClusterViewBuilder builder = new ClusterViewBuilder(10, 2); + ClusterView view = builder.active(2, 5, 6).inactive(3).asView(clusterId); // {"seq":10,"id":"35f60ed3-508d-4a81-b812-89f07f57db20","me":2,"active":[2],"deactivating":[],"inactive":[3]} JsonObject o = asJsonObject(view); Map props = o.getProperties(); assertEquals("10", props.get("seq")); assertEquals("true", props.get("final")); -// assertEquals(clusterViewId, unwrapString(props.get("id"))); + assertEquals(clusterId, unwrapString(props.get("id"))); assertEquals("2", props.get("me")); assertEquals(asJsonArray(2, 5, 6), props.get("active")); assertEquals(asJsonArray(), props.get("deactivating")); @@ -164,16 +164,16 @@ @Test public void testOneActiveSeveralInactive() throws Exception { - String clusterViewId = UUID.randomUUID().toString(); - ClusterViewBuilder builder = new ClusterViewBuilder(10, clusterViewId, 2); - ClusterView view = builder.active(2).inactive(3, 4, 5, 6).asView(); + String clusterId = UUID.randomUUID().toString(); + ClusterViewBuilder builder = new ClusterViewBuilder(10, 2); + ClusterView view = builder.active(2).inactive(3, 4, 5, 6).asView(clusterId); // {"seq":10,"id":"35f60ed3-508d-4a81-b812-89f07f57db20","me":2,"active":[2],"deactivating":[],"inactive":[3]} JsonObject o = asJsonObject(view); Map props = o.getProperties(); assertEquals("10", props.get("seq")); assertEquals("true", props.get("final")); -// assertEquals(clusterViewId, unwrapString(props.get("id"))); + assertEquals(clusterId, unwrapString(props.get("id"))); assertEquals("2", props.get("me")); assertEquals(asJsonArray(2), props.get("active")); assertEquals(asJsonArray(), props.get("deactivating")); @@ -182,15 +182,15 @@ @Test public void testWithRecoveringOnly() throws Exception { - String clusterViewId = UUID.randomUUID().toString(); - ClusterViewBuilder builder = new ClusterViewBuilder(10, clusterViewId, 2); - ClusterView view = builder.active(2, 3).recovering(4).inactive(5, 6).asView(); + String clusterId = UUID.randomUUID().toString(); + ClusterViewBuilder builder = new ClusterViewBuilder(10, 2); + ClusterView view = builder.active(2, 3).recovering(4).inactive(5, 6).asView(clusterId); JsonObject o = asJsonObject(view); Map props = o.getProperties(); assertEquals("10", props.get("seq")); assertEquals("true", props.get("final")); -// assertEquals(clusterViewId, unwrapString(props.get("id"))); + assertEquals(clusterId, unwrapString(props.get("id"))); assertEquals("2", props.get("me")); assertEquals(asJsonArray(2, 3), props.get("active")); assertEquals(asJsonArray(4), props.get("deactivating")); @@ -199,14 +199,14 @@ @Test public void testWithRecoveringAndBacklog() throws Exception { - String clusterViewId = UUID.randomUUID().toString(); - ClusterViewBuilder builder = new ClusterViewBuilder(10, clusterViewId, 2); - ClusterView view = builder.active(2, 3).recovering(4).inactive(5, 6).backlogs(5).asView(); + String clusterId = UUID.randomUUID().toString(); + ClusterViewBuilder builder = new ClusterViewBuilder(10, 2); + ClusterView view = builder.active(2, 3).recovering(4).inactive(5, 6).backlogs(5).asView(clusterId); JsonObject o = asJsonObject(view); Map props = o.getProperties(); assertEquals("10", props.get("seq")); -// assertEquals(clusterViewId, unwrapString(props.get("id"))); + assertEquals(clusterId, unwrapString(props.get("id"))); assertEquals("2", props.get("me")); assertEquals("false", props.get("final")); assertEquals(asJsonArray(2, 3), props.get("active")); @@ -216,10 +216,10 @@ @Test public void testBacklogButNotInactive() throws Exception { - String clusterViewId = UUID.randomUUID().toString(); - ClusterViewBuilder builder = new ClusterViewBuilder(10, clusterViewId, 2); + String clusterId = UUID.randomUUID().toString(); + ClusterViewBuilder builder = new ClusterViewBuilder(10, 2); try { - ClusterView view = builder.active(2, 3).backlogs(5).asView(); + ClusterView view = builder.active(2, 3).backlogs(5).asView(clusterId); fail("should complain"); } catch (Exception ok) { // ok Index: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/MongoBlobGCTest.java =================================================================== --- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/MongoBlobGCTest.java (revision 1730495) +++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/MongoBlobGCTest.java (working copy) @@ -261,7 +261,7 @@ DocumentNodeStore store = mk.getNodeStore(); String repoId = null; if (SharedDataStoreUtils.isShared(store.getBlobStore())) { - repoId = ClusterRepositoryInfo.createId(store); + repoId = ClusterRepositoryInfo.getOrCreateId(store); ((SharedDataStore) store.getBlobStore()).addMetadataRecord( new ByteArrayInputStream(new byte[0]), REPOSITORY.getNameFromId(repoId)); @@ -290,7 +290,7 @@ DocumentNodeStore store = mk.getNodeStore(); String repoId = null; if (SharedDataStoreUtils.isShared(store.getBlobStore())) { - repoId = ClusterRepositoryInfo.createId(store); + repoId = ClusterRepositoryInfo.getOrCreateId(store); ((SharedDataStore) store.getBlobStore()).addMetadataRecord( new ByteArrayInputStream(new byte[0]), REPOSITORY.getNameFromId(repoId)); Index: oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java =================================================================== --- oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java (revision 1730495) +++ oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/SharedBlobStoreGCTest.java (working copy) @@ -87,7 +87,7 @@ .setBlobStore(blobeStore1) .clock(clock) .getNodeStore(); - String repoId1 = ClusterRepositoryInfo.createId(ds1); + String repoId1 = ClusterRepositoryInfo.getOrCreateId(ds1); // Register the unique repository id in the data store ((SharedDataStore) blobeStore1).addMetadataRecord(new ByteArrayInputStream(new byte[0]), SharedStoreRecordType.REPOSITORY.getNameFromId(repoId1)); @@ -99,7 +99,7 @@ .setBlobStore(blobeStore2) .clock(clock) .getNodeStore(); - String repoId2 = ClusterRepositoryInfo.createId(ds2); + String repoId2 = ClusterRepositoryInfo.getOrCreateId(ds2); // Register the unique repository id in the data store ((SharedDataStore) blobeStore2).addMetadataRecord(new ByteArrayInputStream(new byte[0]), SharedStoreRecordType.REPOSITORY.getNameFromId(repoId2)); Index: oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentDiscoveryLiteService.java =================================================================== --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentDiscoveryLiteService.java (revision 1730495) +++ oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentDiscoveryLiteService.java (working copy) @@ -32,6 +32,8 @@ import org.apache.jackrabbit.commons.SimpleValueFactory; import org.apache.jackrabbit.oak.api.Descriptors; import org.apache.jackrabbit.oak.osgi.OsgiWhiteboard; +import org.apache.jackrabbit.oak.plugins.document.DocumentDiscoveryLiteService; +import org.apache.jackrabbit.oak.plugins.identifier.ClusterRepositoryInfo; import org.apache.jackrabbit.oak.spi.state.NodeStore; import org.osgi.service.component.ComponentContext; import org.slf4j.Logger; @@ -128,7 +130,10 @@ // to upper layers that we're not really in a cluster and that // this low level descriptor doesn't manage the 'cluster id' // in such a case. - return "{\"seq\":1,\"final\":true,\"me\":1,\"active\":[1],\"deactivating\":[],\"inactive\":[]}"; + // OAK-4006: but ClusterRepositoryInfo now provides a persistent clusterId, + // so that is now used also for discovery-lite via exactly below 'id' + String clusterId = ClusterRepositoryInfo.getOrCreateId(nodeStore); + return "{\"seq\":1,\"final\":true,\"me\":1,\"id\":\""+clusterId+"\",\"active\":[1],\"deactivating\":[],\"inactive\":[]}"; } /** Index: oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStoreService.java =================================================================== --- oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStoreService.java (revision 1730495) +++ oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStoreService.java (working copy) @@ -39,6 +39,7 @@ import java.io.Closeable; import java.io.File; import java.io.IOException; +import java.util.Collections; import java.util.Dictionary; import java.util.Hashtable; import java.util.concurrent.TimeUnit; @@ -53,6 +54,8 @@ import org.apache.felix.scr.annotations.Reference; import org.apache.felix.scr.annotations.ReferenceCardinality; import org.apache.felix.scr.annotations.ReferencePolicy; +import org.apache.jackrabbit.commons.SimpleValueFactory; +import org.apache.jackrabbit.oak.api.Descriptors; import org.apache.jackrabbit.oak.api.jmx.CacheStatsMBean; import org.apache.jackrabbit.oak.api.jmx.CheckpointMBean; import org.apache.jackrabbit.oak.cache.CacheStats; @@ -89,6 +92,7 @@ import org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardExecutor; import org.apache.jackrabbit.oak.stats.Clock; import org.apache.jackrabbit.oak.stats.StatisticsProvider; +import org.apache.jackrabbit.oak.util.GenericDescriptors; import org.osgi.framework.Constants; import org.osgi.framework.ServiceRegistration; import org.osgi.service.component.ComponentContext; @@ -285,13 +289,6 @@ ) public static final String PROP_BLOB_GC_MAX_AGE = "blobGcMaxAgeInSecs"; - private static final String DEFAULT_SHARED_DS_REPO_ID = ""; - @Property(value = DEFAULT_SHARED_DS_REPO_ID, - label = "SharedDataStore RepositoryID", - description = "Custom RepositoryID for SharedDataStore. This overrides any generated value" - ) - public static final String PROP_SHARED_DS_REPO_ID = "sharedDSRepoId"; - @Override protected SegmentNodeStore getNodeStore() { checkState(delegate != null, "service must be activated when used"); @@ -453,13 +450,19 @@ revisionGCRegistration = registerMBean(whiteboard, RevisionGCMBean.class, revisionGC, RevisionGCMBean.TYPE, "Segment node store revision garbage collection"); + // ensure a clusterId is initialized + // and expose it as 'oak.clusterid' repository descriptor + GenericDescriptors clusterIdDesc = new GenericDescriptors(); + clusterIdDesc.put(ClusterRepositoryInfo.OAK_CLUSTERID_REPOSITORY_DESCRIPTOR_KEY, + new SimpleValueFactory().createValue( + ClusterRepositoryInfo.getOrCreateId(delegate)), true, false); + whiteboard.register(Descriptors.class, clusterIdDesc, Collections.emptyMap()); + // If a shared data store register the repo id in the data store String repoId = ""; if (SharedDataStoreUtils.isShared(blobStore)) { - String customRepoID = property(PROP_SHARED_DS_REPO_ID); - try { - repoId = ClusterRepositoryInfo.createId(delegate, customRepoID); + repoId = ClusterRepositoryInfo.getOrCreateId(delegate); ((SharedDataStore) blobStore).addMetadataRecord(new ByteArrayInputStream(new byte[0]), SharedStoreRecordType.REPOSITORY.getNameFromId(repoId)); } catch (Exception e) { Index: oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentDataStoreBlobGCIT.java =================================================================== --- oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentDataStoreBlobGCIT.java (revision 1730495) +++ oak-segment/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentDataStoreBlobGCIT.java (working copy) @@ -296,7 +296,7 @@ ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); String repoId = null; if (SharedDataStoreUtils.isShared(store.getBlobStore())) { - repoId = ClusterRepositoryInfo.createId(nodeStore); + repoId = ClusterRepositoryInfo.getOrCreateId(nodeStore); ((SharedDataStore) store.getBlobStore()).addMetadataRecord( new ByteArrayInputStream(new byte[0]), REPOSITORY.getNameFromId(repoId)); @@ -349,7 +349,7 @@ private MarkSweepGarbageCollector init(long blobGcMaxAgeInSecs, ThreadPoolExecutor executor) throws Exception { String repoId = null; if (SharedDataStoreUtils.isShared(store.getBlobStore())) { - repoId = ClusterRepositoryInfo.createId(nodeStore); + repoId = ClusterRepositoryInfo.getOrCreateId(nodeStore); ((SharedDataStore) store.getBlobStore()).addMetadataRecord( new ByteArrayInputStream(new byte[0]), REPOSITORY.getNameFromId(repoId)); Index: oak-doc/src/site/markdown/osgi_config.md =================================================================== --- oak-doc/src/site/markdown/osgi_config.md (revision 1730495) +++ oak-doc/src/site/markdown/osgi_config.md (working copy) @@ -61,10 +61,6 @@ (currentTime - lastModifiedTime > blobGcMaxAgeInSecs). For example as per default only those blobs which have been created 24 hrs ago would be considered for GC -sharedDSRepoId (From Oak 1.2.11 & Oak 1.3.15) -: Default "" -: Custom SharedDataStore repositoryId. Used when custom blobstore configured. Should be unique among the repositories sharing the datastore. - #### DocumentNodeStore