Index: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/Configuration.java =================================================================== --- oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/Configuration.java (revision 1817088) +++ oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/Configuration.java (working copy) @@ -24,13 +24,13 @@ import org.osgi.service.metatype.annotations.Option; import static org.apache.jackrabbit.oak.plugins.document.Configuration.PID; -import static org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_CACHE_SEGMENT_COUNT; -import static org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_CACHE_STACK_MOVE_DISTANCE; -import static org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_CHILDREN_CACHE_PERCENTAGE; -import static org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_DIFF_CACHE_PERCENTAGE; -import static org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_NODE_CACHE_PERCENTAGE; -import static org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_PREV_DOC_CACHE_PERCENTAGE; -import static org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_UPDATE_LIMIT; +import static org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder.DEFAULT_CACHE_SEGMENT_COUNT; +import static org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder.DEFAULT_CACHE_STACK_MOVE_DISTANCE; +import static org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder.DEFAULT_CHILDREN_CACHE_PERCENTAGE; +import static org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder.DEFAULT_DIFF_CACHE_PERCENTAGE; +import static org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder.DEFAULT_NODE_CACHE_PERCENTAGE; +import static org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder.DEFAULT_PREV_DOC_CACHE_PERCENTAGE; +import static org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder.DEFAULT_UPDATE_LIMIT; @ObjectClassDefinition( pid = {PID}, Index: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java =================================================================== --- oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java (revision 1817088) +++ oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java (working copy) @@ -16,53 +16,21 @@ */ package org.apache.jackrabbit.oak.plugins.document; -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Suppliers.memoize; import static com.google.common.base.Suppliers.ofInstance; import static org.apache.jackrabbit.oak.commons.PathUtils.concat; -import static org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreService.DEFAULT_JOURNAL_GC_MAX_AGE_MILLIS; -import static org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreServiceConfiguration.PROP_UPDATE_LIMIT; -import static org.apache.jackrabbit.oak.plugins.document.util.MongoConnection.readConcernLevel; import java.io.InputStream; -import java.net.UnknownHostException; -import java.util.EnumMap; -import java.util.Iterator; import java.util.List; -import java.util.Map; import java.util.Set; -import java.util.concurrent.CopyOnWriteArraySet; -import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; -import javax.annotation.CheckForNull; import javax.annotation.Nonnull; import javax.annotation.Nullable; import javax.sql.DataSource; -import com.google.common.base.Predicate; -import com.google.common.base.Predicates; -import com.google.common.base.Supplier; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalCause; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import com.google.common.cache.Weigher; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import com.google.common.util.concurrent.MoreExecutors; -import com.mongodb.DB; -import com.mongodb.MongoClientOptions; -import com.mongodb.ReadConcernLevel; import org.apache.jackrabbit.oak.api.CommitFailedException; import org.apache.jackrabbit.oak.api.PropertyState; -import org.apache.jackrabbit.oak.cache.CacheLIRS; -import org.apache.jackrabbit.oak.cache.CacheLIRS.EvictionCallback; -import org.apache.jackrabbit.oak.cache.CacheStats; -import org.apache.jackrabbit.oak.cache.CacheValue; -import org.apache.jackrabbit.oak.cache.EmpiricalWeigher; import org.apache.jackrabbit.oak.commons.PathUtils; import org.apache.jackrabbit.oak.commons.json.JsopReader; import org.apache.jackrabbit.oak.commons.json.JsopStream; @@ -69,40 +37,17 @@ import org.apache.jackrabbit.oak.commons.json.JsopTokenizer; import org.apache.jackrabbit.oak.commons.json.JsopWriter; import org.apache.jackrabbit.oak.json.JsopDiff; -import org.apache.jackrabbit.oak.plugins.blob.BlobStoreStats; -import org.apache.jackrabbit.oak.plugins.blob.CachingBlobStore; import org.apache.jackrabbit.oak.plugins.blob.ReferencedBlob; import org.apache.jackrabbit.oak.plugins.document.DocumentNodeState.Children; -import org.apache.jackrabbit.oak.plugins.document.cache.NodeDocumentCache; -import org.apache.jackrabbit.oak.plugins.document.locks.NodeDocumentLocks; -import org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore; -import org.apache.jackrabbit.oak.plugins.document.mongo.MongoBlobReferenceIterator; -import org.apache.jackrabbit.oak.plugins.document.mongo.MongoBlobStore; -import org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore; -import org.apache.jackrabbit.oak.plugins.document.mongo.MongoMissingLastRevSeeker; -import org.apache.jackrabbit.oak.plugins.document.mongo.MongoVersionGCSupport; -import org.apache.jackrabbit.oak.plugins.document.persistentCache.CacheType; -import org.apache.jackrabbit.oak.plugins.document.persistentCache.EvictionListener; -import org.apache.jackrabbit.oak.plugins.document.persistentCache.PersistentCache; -import org.apache.jackrabbit.oak.plugins.document.persistentCache.PersistentCacheStats; +import org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentNodeStoreBuilder; import org.apache.jackrabbit.oak.plugins.document.rdb.RDBBlobReferenceIterator; import org.apache.jackrabbit.oak.plugins.document.rdb.RDBBlobStore; +import org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentNodeStoreBuilder; import org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore; import org.apache.jackrabbit.oak.plugins.document.rdb.RDBOptions; import org.apache.jackrabbit.oak.plugins.document.rdb.RDBVersionGCSupport; -import org.apache.jackrabbit.oak.plugins.document.util.MongoConnection; -import org.apache.jackrabbit.oak.plugins.document.mongo.MongoStatus; -import org.apache.jackrabbit.oak.plugins.document.util.RevisionsKey; -import org.apache.jackrabbit.oak.plugins.document.util.StringValue; -import org.apache.jackrabbit.oak.spi.blob.AbstractBlobStore; -import org.apache.jackrabbit.oak.spi.blob.BlobStore; import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore; -import org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore; import org.apache.jackrabbit.oak.spi.commit.CommitInfo; -import org.apache.jackrabbit.oak.spi.gc.GCMonitor; -import org.apache.jackrabbit.oak.spi.gc.LoggingGCMonitor; -import org.apache.jackrabbit.oak.stats.Clock; -import org.apache.jackrabbit.oak.stats.StatisticsProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -109,44 +54,22 @@ /** * A JSON-based wrapper around the NodeStore implementation that stores the * data in a {@link DocumentStore}. It is used for testing purpose only. + * @deprecated Use {@link DocumentNodeStore} instead. */ +@Deprecated public class DocumentMK { static final Logger LOG = LoggerFactory.getLogger(DocumentMK.class); /** - * The path where the persistent cache is stored. - */ - static final String DEFAULT_PERSISTENT_CACHE_URI = - System.getProperty("oak.documentMK.persCache"); - - /** * The threshold where special handling for many child node starts. */ - static final int MANY_CHILDREN_THRESHOLD = Integer.getInteger( - "oak.documentMK.manyChildren", 50); - + static final int MANY_CHILDREN_THRESHOLD = DocumentNodeStoreBuilder.MANY_CHILDREN_THRESHOLD; /** - * Enable or disable the LIRS cache (null to use the default setting for this configuration). - */ - static final Boolean LIRS_CACHE; - - static { - String s = System.getProperty("oak.documentMK.lirsCache"); - LIRS_CACHE = s == null ? null : Boolean.parseBoolean(s); - } - - /** - * Enable fast diff operations. - */ - static final boolean FAST_DIFF = Boolean.parseBoolean( - System.getProperty("oak.documentMK.fastDiff", "true")); - - /** * Number of content updates that need to happen before the updates * are automatically purged to the private branch. */ - static final int UPDATE_LIMIT = Integer.getInteger("update.limit", Builder.DEFAULT_UPDATE_LIMIT); + static final int UPDATE_LIMIT = DocumentNodeStoreBuilder.UPDATE_LIMIT; /** * The node store. @@ -553,201 +476,27 @@ /** * A builder for a DocumentMK instance. + * @deprecated Use {@link DocumentNodeStoreBuilder} instead or one of the + * backend implementation specific variants {@link MongoDocumentNodeStoreBuilder} + * or {@link RDBDocumentNodeStoreBuilder}. + * */ - public static class Builder { - public static final long DEFAULT_MEMORY_CACHE_SIZE = 256 * 1024 * 1024; - public static final int DEFAULT_NODE_CACHE_PERCENTAGE = 35; - public static final int DEFAULT_PREV_DOC_CACHE_PERCENTAGE = 4; - public static final int DEFAULT_CHILDREN_CACHE_PERCENTAGE = 15; - public static final int DEFAULT_DIFF_CACHE_PERCENTAGE = 30; - public static final int DEFAULT_CACHE_SEGMENT_COUNT = 16; - public static final int DEFAULT_CACHE_STACK_MOVE_DISTANCE = 16; - public static final int DEFAULT_UPDATE_LIMIT = 100000; + @Deprecated + public static class Builder extends MongoDocumentNodeStoreBuilder { + public static final long DEFAULT_MEMORY_CACHE_SIZE = DocumentNodeStoreBuilder.DEFAULT_MEMORY_CACHE_SIZE; + public static final int DEFAULT_NODE_CACHE_PERCENTAGE = DocumentNodeStoreBuilder.DEFAULT_NODE_CACHE_PERCENTAGE; + public static final int DEFAULT_PREV_DOC_CACHE_PERCENTAGE = DocumentNodeStoreBuilder.DEFAULT_PREV_DOC_CACHE_PERCENTAGE; + public static final int DEFAULT_CHILDREN_CACHE_PERCENTAGE = DocumentNodeStoreBuilder.DEFAULT_CHILDREN_CACHE_PERCENTAGE; + public static final int DEFAULT_DIFF_CACHE_PERCENTAGE = DocumentNodeStoreBuilder.DEFAULT_DIFF_CACHE_PERCENTAGE; + public static final int DEFAULT_CACHE_SEGMENT_COUNT = DocumentNodeStoreBuilder.DEFAULT_CACHE_SEGMENT_COUNT; + public static final int DEFAULT_CACHE_STACK_MOVE_DISTANCE = DocumentNodeStoreBuilder.DEFAULT_CACHE_STACK_MOVE_DISTANCE; + public static final int DEFAULT_UPDATE_LIMIT = DocumentNodeStoreBuilder.DEFAULT_UPDATE_LIMIT; private DocumentNodeStore nodeStore; - private Supplier documentStoreSupplier = ofInstance(new MemoryDocumentStore()); - private String mongoUri; - private boolean socketKeepAlive; - private MongoStatus mongoStatus; - private DiffCache diffCache; - private BlobStore blobStore; - private int clusterId = Integer.getInteger("oak.documentMK.clusterId", 0); - private int asyncDelay = 1000; - private boolean timing; - private boolean logging; - private boolean leaseCheck = true; // OAK-2739 is enabled by default also for non-osgi - private boolean isReadOnlyMode = false; - private Weigher weigher = new EmpiricalWeigher(); - private long memoryCacheSize = DEFAULT_MEMORY_CACHE_SIZE; - private int nodeCachePercentage = DEFAULT_NODE_CACHE_PERCENTAGE; - private int prevDocCachePercentage = DEFAULT_PREV_DOC_CACHE_PERCENTAGE; - private int childrenCachePercentage = DEFAULT_CHILDREN_CACHE_PERCENTAGE; - private int diffCachePercentage = DEFAULT_DIFF_CACHE_PERCENTAGE; - private int cacheSegmentCount = DEFAULT_CACHE_SEGMENT_COUNT; - private int cacheStackMoveDistance = DEFAULT_CACHE_STACK_MOVE_DISTANCE; - private boolean useSimpleRevision; - private long maxReplicationLagMillis = TimeUnit.HOURS.toMillis(6); - private boolean disableBranches; - private boolean prefetchExternalChanges; - private Clock clock = Clock.SIMPLE; - private Executor executor; - private String persistentCacheURI = DEFAULT_PERSISTENT_CACHE_URI; - private PersistentCache persistentCache; - private String journalCacheURI; - private PersistentCache journalCache; - private LeaseFailureHandler leaseFailureHandler; - private StatisticsProvider statisticsProvider = StatisticsProvider.NOOP; - private BlobStoreStats blobStoreStats; - private CacheStats blobStoreCacheStats; - private DocumentStoreStatsCollector documentStoreStatsCollector; - private DocumentNodeStoreStatsCollector nodeStoreStatsCollector; - private Map persistentCacheStats = - new EnumMap(CacheType.class); - private boolean bundlingDisabled; - private JournalPropertyHandlerFactory journalPropertyHandlerFactory = - new JournalPropertyHandlerFactory(); - private int updateLimit = UPDATE_LIMIT; - private int commitValueCacheSize = 10000; - private long maxRevisionAgeMillis = DEFAULT_JOURNAL_GC_MAX_AGE_MILLIS; - private GCMonitor gcMonitor = new LoggingGCMonitor( - LoggerFactory.getLogger(VersionGarbageCollector.class)); - private Predicate nodeCachePredicate = Predicates.alwaysTrue(); public Builder() { } /** - * Uses the given information to connect to to MongoDB as backend - * storage for the DocumentNodeStore. The write concern is either - * taken from the URI or determined automatically based on the MongoDB - * setup. When running on a replica set without explicit write concern - * in the URI, the write concern will be {@code MAJORITY}, otherwise - * {@code ACKNOWLEDGED}. - * - * @param uri a MongoDB URI. - * @param name the name of the database to connect to. This overrides - * any database name given in the {@code uri}. - * @param blobCacheSizeMB the blob cache size in MB. - * @return this - * @throws UnknownHostException if one of the hosts given in the URI - * is unknown. - */ - public Builder setMongoDB(@Nonnull String uri, - @Nonnull String name, - int blobCacheSizeMB) - throws UnknownHostException { - this.mongoUri = uri; - - MongoClientOptions.Builder options = MongoConnection.getDefaultBuilder(); - options.socketKeepAlive(socketKeepAlive); - DB db = new MongoConnection(uri, options).getDB(name); - MongoStatus status = new MongoStatus(db); - if (!MongoConnection.hasWriteConcern(uri)) { - db.setWriteConcern(MongoConnection.getDefaultWriteConcern(db)); - } - if (status.isMajorityReadConcernSupported() && status.isMajorityReadConcernEnabled() && !MongoConnection.hasReadConcern(uri)) { - db.setReadConcern(MongoConnection.getDefaultReadConcern(db)); - } - setMongoDB(db, status, blobCacheSizeMB); - return this; - } - - /** - * Use the given MongoDB as backend storage for the DocumentNodeStore. - * - * @param db the MongoDB connection - * @return this - */ - public Builder setMongoDB(@Nonnull DB db, - int blobCacheSizeMB) { - return setMongoDB(db, new MongoStatus(db), blobCacheSizeMB); - } - - private Builder setMongoDB(@Nonnull DB db, - MongoStatus status, - int blobCacheSizeMB) { - if (!MongoConnection.hasSufficientWriteConcern(db)) { - LOG.warn("Insufficient write concern: " + db.getWriteConcern() - + " At least " + MongoConnection.getDefaultWriteConcern(db) + " is recommended."); - } - if (status.isMajorityReadConcernSupported() && !status.isMajorityReadConcernEnabled()) { - LOG.warn("The read concern should be enabled on mongod using --enableMajorityReadConcern"); - } else if (status.isMajorityReadConcernSupported() && !MongoConnection.hasSufficientReadConcern(db)) { - ReadConcernLevel currentLevel = readConcernLevel(db.getReadConcern()); - ReadConcernLevel recommendedLevel = readConcernLevel(MongoConnection.getDefaultReadConcern(db)); - if (currentLevel == null) { - LOG.warn("Read concern hasn't been set. At least " + recommendedLevel + " is recommended."); - } else { - LOG.warn("Insufficient read concern: " + currentLevel + ". At least " + recommendedLevel + " is recommended."); - } - } - - this.mongoStatus = status; - this.documentStoreSupplier = memoize(new Supplier() { - @Override - public DocumentStore get() { - return new MongoDocumentStore(db, DocumentMK.Builder.this); - } - }); - - if (this.blobStore == null) { - GarbageCollectableBlobStore s = new MongoBlobStore(db, blobCacheSizeMB * 1024 * 1024L); - setBlobStore(s); - } - return this; - } - - /** - * Enables the socket keep-alive option for MongoDB. The default is - * disabled. - * - * @param enable whether to enable it. - * @return this - */ - public Builder setSocketKeepAlive(boolean enable) { - this.socketKeepAlive = enable; - return this; - } - - private void setBlobStore(GarbageCollectableBlobStore s) { - configureBlobStore(s); - PersistentCache p = getPersistentCache(); - if (p != null) { - s = p.wrapBlobStore(s); - } - this.blobStore = s; - } - - /** - * Use the given MongoDB as backend storage for the DocumentNodeStore. - * - * @param db the MongoDB connection - * @return this - */ - public Builder setMongoDB(@Nonnull DB db) { - return setMongoDB(db, 16); - } - - /** - * Returns the Mongo URI used in the {@link #setMongoDB(String, String, int)} method. - * - * @return the Mongo URI or null if the {@link #setMongoDB(String, String, int)} method hasn't - * been called. - */ - public String getMongoUri() { - return mongoUri; - } - - /** - * Returns the status of the Mongo server configured in the {@link #setMongoDB(String, String, int)} method. - * - * @return the status or null if the {@link #setMongoDB(String, String, int)} method hasn't - * been called. - */ - public MongoStatus getMongoStatus() { - return mongoStatus; - } - - /** * Sets a {@link DataSource} to use for the RDB document and blob * stores. * @@ -768,7 +517,7 @@ this.documentStoreSupplier = ofInstance(new RDBDocumentStore(ds, this, options)); if(blobStore == null) { GarbageCollectableBlobStore s = new RDBBlobStore(ds, options); - setBlobStore(s); + setGCBlobStore(s); } return this; } @@ -783,434 +532,33 @@ this.documentStoreSupplier = ofInstance(new RDBDocumentStore(documentStoreDataSource, this)); if(blobStore == null) { GarbageCollectableBlobStore s = new RDBBlobStore(blobStoreDataSource); - setBlobStore(s); + setGCBlobStore(s); } return this; } - /** - * Sets the persistent cache option. - * - * @return this - */ - public Builder setPersistentCache(String persistentCache) { - this.persistentCacheURI = persistentCache; - return this; - } - - /** - * Sets the journal cache option. - * - * @return this - */ - public Builder setJournalCache(String journalCache) { - this.journalCacheURI = journalCache; - return this; - } - - /** - * Use the timing document store wrapper. - * - * @param timing whether to use the timing wrapper. - * @return this - */ - public Builder setTiming(boolean timing) { - this.timing = timing; - return this; - } - - public boolean getTiming() { - return timing; - } - - public Builder setLogging(boolean logging) { - this.logging = logging; - return this; - } - - public boolean getLogging() { - return logging; - } - - public Builder setLeaseCheck(boolean leaseCheck) { - this.leaseCheck = leaseCheck; - return this; - } - - public boolean getLeaseCheck() { - return leaseCheck; - } - - public Builder setReadOnlyMode() { - this.isReadOnlyMode = true; - return this; - } - - public boolean getReadOnlyMode() { - return isReadOnlyMode; - } - - public Builder setLeaseFailureHandler(LeaseFailureHandler leaseFailureHandler) { - this.leaseFailureHandler = leaseFailureHandler; - return this; - } - - public LeaseFailureHandler getLeaseFailureHandler() { - return leaseFailureHandler; - } - - /** - * Set the document store to use. By default an in-memory store is used. - * - * @param documentStore the document store - * @return this - */ - public Builder setDocumentStore(DocumentStore documentStore) { - this.documentStoreSupplier = ofInstance(documentStore); - return this; - } - - public DocumentStore getDocumentStore() { - return documentStoreSupplier.get(); - } - public DocumentNodeStore getNodeStore() { if (nodeStore == null) { - nodeStore = new DocumentNodeStore(this); + nodeStore = build(); } return nodeStore; } - public DiffCache getDiffCache() { - if (diffCache == null) { - diffCache = new TieredDiffCache(this); - } - return diffCache; - } - - public Builder setDiffCache(DiffCache diffCache) { - this.diffCache = diffCache; - return this; - } - - /** - * Set the blob store to use. By default an in-memory store is used. - * - * @param blobStore the blob store - * @return this - */ - public Builder setBlobStore(BlobStore blobStore) { - this.blobStore = blobStore; - return this; - } - - public BlobStore getBlobStore() { - if (blobStore == null) { - blobStore = new MemoryBlobStore(); - configureBlobStore(blobStore); - } - return blobStore; - } - - /** - * Set the cluster id to use. By default, 0 is used, meaning the cluster - * id is automatically generated. - * - * @param clusterId the cluster id - * @return this - */ - public Builder setClusterId(int clusterId) { - this.clusterId = clusterId; - return this; - } - - public Builder setCacheSegmentCount(int cacheSegmentCount) { - this.cacheSegmentCount = cacheSegmentCount; - return this; - } - - public Builder setCacheStackMoveDistance(int cacheSegmentCount) { - this.cacheStackMoveDistance = cacheSegmentCount; - return this; - } - - public int getClusterId() { - return clusterId; - } - - /** - * Set the maximum delay to write the last revision to the root node. By - * default 1000 (meaning 1 second) is used. - * - * @param asyncDelay in milliseconds - * @return this - */ - public Builder setAsyncDelay(int asyncDelay) { - this.asyncDelay = asyncDelay; - return this; - } - - public int getAsyncDelay() { - return asyncDelay; - } - - public Weigher getWeigher() { - return weigher; - } - - public Builder withWeigher(Weigher weigher) { - this.weigher = weigher; - return this; - } - - public Builder memoryCacheSize(long memoryCacheSize) { - this.memoryCacheSize = memoryCacheSize; - return this; - } - - public Builder memoryCacheDistribution(int nodeCachePercentage, - int prevDocCachePercentage, - int childrenCachePercentage, - int diffCachePercentage) { - checkArgument(nodeCachePercentage >= 0); - checkArgument(prevDocCachePercentage >= 0); - checkArgument(childrenCachePercentage>= 0); - checkArgument(diffCachePercentage >= 0); - checkArgument(nodeCachePercentage + prevDocCachePercentage + childrenCachePercentage + - diffCachePercentage < 100); - this.nodeCachePercentage = nodeCachePercentage; - this.prevDocCachePercentage = prevDocCachePercentage; - this.childrenCachePercentage = childrenCachePercentage; - this.diffCachePercentage = diffCachePercentage; - return this; - } - - public long getNodeCacheSize() { - return memoryCacheSize * nodeCachePercentage / 100; - } - - public long getPrevDocumentCacheSize() { - return memoryCacheSize * prevDocCachePercentage / 100; - } - - public long getChildrenCacheSize() { - return memoryCacheSize * childrenCachePercentage / 100; - } - - public long getDocumentCacheSize() { - return memoryCacheSize - getNodeCacheSize() - getPrevDocumentCacheSize() - getChildrenCacheSize() - - getDiffCacheSize(); - } - - public long getDiffCacheSize() { - return memoryCacheSize * diffCachePercentage / 100; - } - - public long getMemoryDiffCacheSize() { - return getDiffCacheSize() / 2; - } - - public long getLocalDiffCacheSize() { - return getDiffCacheSize() / 2; - } - - public Builder setUseSimpleRevision(boolean useSimpleRevision) { - this.useSimpleRevision = useSimpleRevision; - return this; - } - - public boolean isUseSimpleRevision() { - return useSimpleRevision; - } - - public Executor getExecutor() { - if(executor == null){ - return MoreExecutors.sameThreadExecutor(); - } - return executor; - } - - public Builder setExecutor(Executor executor){ - this.executor = executor; - return this; - } - - public Builder clock(Clock clock) { - this.clock = clock; - return this; - } - - public Builder setStatisticsProvider(StatisticsProvider statisticsProvider){ - this.statisticsProvider = statisticsProvider; - return this; - } - - public StatisticsProvider getStatisticsProvider() { - return this.statisticsProvider; - } - public DocumentStoreStatsCollector getDocumentStoreStatsCollector() { - if (documentStoreStatsCollector == null) { - documentStoreStatsCollector = new DocumentStoreStats(statisticsProvider); - } - return documentStoreStatsCollector; - } - - public Builder setDocumentStoreStatsCollector(DocumentStoreStatsCollector documentStoreStatsCollector) { - this.documentStoreStatsCollector = documentStoreStatsCollector; - return this; - } - - public DocumentNodeStoreStatsCollector getNodeStoreStatsCollector() { - if (nodeStoreStatsCollector == null) { - nodeStoreStatsCollector = new DocumentNodeStoreStats(statisticsProvider); - } - return nodeStoreStatsCollector; - } - - public Builder setNodeStoreStatsCollector(DocumentNodeStoreStatsCollector statsCollector) { - this.nodeStoreStatsCollector = statsCollector; - return this; - } - - @Nonnull - public Map getPersistenceCacheStats() { - return persistentCacheStats; - } - - @CheckForNull - public BlobStoreStats getBlobStoreStats() { - return blobStoreStats; - } - - @CheckForNull - public CacheStats getBlobStoreCacheStats() { - return blobStoreCacheStats; - } - - public Clock getClock() { - return clock; - } - - public Builder setMaxReplicationLag(long duration, TimeUnit unit){ - maxReplicationLagMillis = unit.toMillis(duration); - return this; - } - - public long getMaxReplicationLagMillis() { - return maxReplicationLagMillis; - } - - public Builder disableBranches() { - disableBranches = true; - return this; - } - - public boolean isDisableBranches() { - return disableBranches; - } - - public Builder setBundlingDisabled(boolean enabled) { - bundlingDisabled = enabled; - return this; - } - - public boolean isBundlingDisabled() { - return bundlingDisabled; - } - - public Builder setPrefetchExternalChanges(boolean b) { - prefetchExternalChanges = b; - return this; - } - - public boolean isPrefetchExternalChanges() { - return prefetchExternalChanges; - } - - public Builder setJournalPropertyHandlerFactory(JournalPropertyHandlerFactory factory) { - journalPropertyHandlerFactory = factory; - return this; - } - - public JournalPropertyHandlerFactory getJournalPropertyHandlerFactory() { - return journalPropertyHandlerFactory; - } - - public Builder setUpdateLimit(int limit) { - updateLimit = limit; - return this; - } - - public int getUpdateLimit() { - return updateLimit; - } - - public Builder setCommitValueCacheSize(int cacheSize) { - this.commitValueCacheSize = cacheSize; - return this; - } - - public int getCommitValueCacheSize() { - return commitValueCacheSize; - } - - public Builder setJournalGCMaxAge(long maxRevisionAgeMillis) { - this.maxRevisionAgeMillis = maxRevisionAgeMillis; - return this; - } - - /** - * The maximum age for journal entries in milliseconds. Older entries - * are candidates for GC. - * - * @return maximum age for journal entries in milliseconds. - */ - public long getJournalGCMaxAge() { - return maxRevisionAgeMillis; - } - - public Builder setGCMonitor(@Nonnull GCMonitor gcMonitor) { - this.gcMonitor = checkNotNull(gcMonitor); - return this; - } - - public GCMonitor getGCMonitor() { - return gcMonitor; - } - - VersionGCSupport createVersionGCSupport() { + public VersionGCSupport createVersionGCSupport() { DocumentStore store = getDocumentStore(); - if (store instanceof MongoDocumentStore) { - return new MongoVersionGCSupport((MongoDocumentStore) store); - } else if (store instanceof RDBDocumentStore) { + if (store instanceof RDBDocumentStore) { return new RDBVersionGCSupport((RDBDocumentStore) store); } else { - return new VersionGCSupport(store); + return super.createVersionGCSupport(); } } - Iterable createReferencedBlobs(final DocumentNodeStore ns) { + public Iterable createReferencedBlobs(DocumentNodeStore ns) { final DocumentStore store = getDocumentStore(); - return new Iterable() { - @Override - public Iterator iterator() { - if (store instanceof MongoDocumentStore) { - return new MongoBlobReferenceIterator(ns, (MongoDocumentStore) store); - } else if (store instanceof RDBDocumentStore) { - return new RDBBlobReferenceIterator(ns, (RDBDocumentStore) store); - } else { - return new BlobReferenceIterator(ns); - } - } - }; - } - - public MissingLastRevSeeker createMissingLastRevSeeker() { - final DocumentStore store = getDocumentStore(); - if (store instanceof MongoDocumentStore) { - return new MongoMissingLastRevSeeker((MongoDocumentStore) store, getClock()); + if (store instanceof RDBDocumentStore) { + return () -> new RDBBlobReferenceIterator(ns, (RDBDocumentStore) store); } else { - return new MissingLastRevSeeker(store, getClock()); + return super.createReferencedBlobs(ns); } } @@ -1222,181 +570,5 @@ public DocumentMK open() { return new DocumentMK(this); } - - public Cache buildNodeCache(DocumentNodeStore store) { - return buildCache(CacheType.NODE, getNodeCacheSize(), store, null); - } - - public Cache buildChildrenCache(DocumentNodeStore store) { - return buildCache(CacheType.CHILDREN, getChildrenCacheSize(), store, null); - } - - public Cache buildMemoryDiffCache() { - return buildCache(CacheType.DIFF, getMemoryDiffCacheSize(), null, null); - } - - public Cache buildLocalDiffCache() { - return buildCache(CacheType.LOCAL_DIFF, getLocalDiffCacheSize(), null, null); - } - - public Cache buildDocumentCache(DocumentStore docStore) { - return buildCache(CacheType.DOCUMENT, getDocumentCacheSize(), null, docStore); - } - - public Cache buildPrevDocumentsCache(DocumentStore docStore) { - return buildCache(CacheType.PREV_DOCUMENT, getPrevDocumentCacheSize(), null, docStore); - } - - public NodeDocumentCache buildNodeDocumentCache(DocumentStore docStore, NodeDocumentLocks locks) { - Cache nodeDocumentsCache = buildDocumentCache(docStore); - CacheStats nodeDocumentsCacheStats = new CacheStats(nodeDocumentsCache, "Document-Documents", getWeigher(), getDocumentCacheSize()); - - Cache prevDocumentsCache = buildPrevDocumentsCache(docStore); - CacheStats prevDocumentsCacheStats = new CacheStats(prevDocumentsCache, "Document-PrevDocuments", getWeigher(), getPrevDocumentCacheSize()); - - return new NodeDocumentCache(nodeDocumentsCache, nodeDocumentsCacheStats, prevDocumentsCache, prevDocumentsCacheStats, locks); - } - - public Builder setNodeCachePredicate(Predicate p){ - this.nodeCachePredicate = p; - return this; - } - - public Predicate getNodeCachePredicate() { - return nodeCachePredicate; - } - - @SuppressWarnings("unchecked") - private Cache buildCache( - CacheType cacheType, - long maxWeight, - DocumentNodeStore docNodeStore, - DocumentStore docStore - ) { - Set> listeners = new CopyOnWriteArraySet>(); - Cache cache = buildCache(cacheType.name(), maxWeight, listeners); - PersistentCache p = null; - if (cacheType == CacheType.DIFF || cacheType == CacheType.LOCAL_DIFF) { - // use separate journal cache if configured - p = getJournalCache(); - } - if (p == null) { - // otherwise fall back to single persistent cache - p = getPersistentCache(); - } - if (p != null) { - cache = p.wrap(docNodeStore, docStore, cache, cacheType, statisticsProvider); - if (cache instanceof EvictionListener) { - listeners.add((EvictionListener) cache); - } - PersistentCacheStats stats = PersistentCache.getPersistentCacheStats(cache); - if (stats != null) { - persistentCacheStats.put(cacheType, stats); - } - } - return cache; - } - - public PersistentCache getPersistentCache() { - if (persistentCacheURI == null) { - return null; - } - if (persistentCache == null) { - try { - persistentCache = new PersistentCache(persistentCacheURI); - } catch (Throwable e) { - LOG.warn("Persistent cache not available; please disable the configuration", e); - throw new IllegalArgumentException(e); - } - } - return persistentCache; - } - - PersistentCache getJournalCache() { - if (journalCacheURI == null) { - return null; - } - if (journalCache == null) { - try { - journalCache = new PersistentCache(journalCacheURI); - } catch (Throwable e) { - LOG.warn("Journal cache not available; please disable the configuration", e); - throw new IllegalArgumentException(e); - } - } - return journalCache; - } - - private Cache buildCache( - String module, - long maxWeight, - final Set> listeners) { - // by default, use the LIRS cache when using the persistent cache, - // but don't use it otherwise - boolean useLirs = persistentCacheURI != null; - // allow to override this by using the system property - if (LIRS_CACHE != null) { - useLirs = LIRS_CACHE; - } - // do not use LIRS cache when maxWeight is zero (OAK-6953) - if (useLirs && maxWeight > 0) { - return CacheLIRS.newBuilder(). - module(module). - weigher(new Weigher() { - @Override - public int weigh(K key, V value) { - return weigher.weigh(key, value); - } - }). - averageWeight(2000). - maximumWeight(maxWeight). - segmentCount(cacheSegmentCount). - stackMoveDistance(cacheStackMoveDistance). - recordStats(). - evictionCallback(new EvictionCallback() { - @Override - public void evicted(K key, V value, RemovalCause cause) { - for (EvictionListener l : listeners) { - l.evicted(key, value, cause); - } - } - }). - build(); - } - return CacheBuilder.newBuilder(). - concurrencyLevel(cacheSegmentCount). - weigher(weigher). - maximumWeight(maxWeight). - recordStats(). - removalListener(new RemovalListener() { - @Override - public void onRemoval(RemovalNotification notification) { - for (EvictionListener l : listeners) { - l.evicted(notification.getKey(), notification.getValue(), notification.getCause()); - } - } - }). - build(); - } - - /** - * BlobStore which are created by builder might get wrapped. - * So here we perform any configuration and also access any - * service exposed by the store - * - * @param blobStore store to config - */ - private void configureBlobStore(BlobStore blobStore) { - if (blobStore instanceof AbstractBlobStore){ - this.blobStoreStats = new BlobStoreStats(statisticsProvider); - ((AbstractBlobStore) blobStore).setStatsCollector(blobStoreStats); - } - - if (blobStore instanceof CachingBlobStore){ - blobStoreCacheStats = ((CachingBlobStore) blobStore).getCacheStats(); - } - } - } - } Index: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java =================================================================== --- oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java (revision 1817088) +++ oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java (working copy) @@ -32,8 +32,7 @@ import static org.apache.jackrabbit.oak.plugins.document.Collection.CLUSTER_NODES; import static org.apache.jackrabbit.oak.plugins.document.Collection.JOURNAL; import static org.apache.jackrabbit.oak.plugins.document.Collection.NODES; -import static org.apache.jackrabbit.oak.plugins.document.DocumentMK.FAST_DIFF; -import static org.apache.jackrabbit.oak.plugins.document.DocumentMK.MANY_CHILDREN_THRESHOLD; +import static org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder.MANY_CHILDREN_THRESHOLD; import static org.apache.jackrabbit.oak.plugins.document.NodeDocument.MODIFIED_IN_SECS_RESOLUTION; import static org.apache.jackrabbit.oak.plugins.document.UpdateOp.Key; import static org.apache.jackrabbit.oak.plugins.document.UpdateOp.Operation; @@ -159,6 +158,12 @@ ); /** + * Enable fast diff operations. + */ + private static final boolean FAST_DIFF = Boolean.parseBoolean( + System.getProperty("oak.documentMK.fastDiff", "true")); + + /** * Feature flag to enable concurrent add/remove operations of hidden empty * nodes. See OAK-2673. */ @@ -507,7 +512,7 @@ private final Predicate nodeCachePredicate; - public DocumentNodeStore(DocumentMK.Builder builder) { + public > DocumentNodeStore(DocumentNodeStoreBuilder builder) { this.nodeCachePredicate = builder.getNodeCachePredicate(); this.updateLimit = builder.getUpdateLimit(); this.commitValueResolver = new CommitValueResolver(builder.getCommitValueCacheSize(), @@ -2995,7 +3000,7 @@ return mbean; } - private DocumentNodeStoreMBean createMBean(DocumentMK.Builder builder) { + private DocumentNodeStoreMBean createMBean(DocumentNodeStoreBuilder builder) { try { return new DocumentNodeStoreMBeanImpl(this, builder.getStatisticsProvider().getStats(), Index: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBranch.java =================================================================== --- oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBranch.java (revision 1817088) +++ oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBranch.java (working copy) @@ -452,7 +452,7 @@ * as the base of this branch *
  • {@link Persisted} on {@link #setRoot(NodeState)} if the number of * changes counted from the base to the new root reaches - * {@link DocumentMK.Builder#getUpdateLimit()}.
  • + * {@link DocumentNodeStoreBuilder#getUpdateLimit()}. *
  • {@link Merged} on {@link BranchState#merge(CommitHook, CommitInfo, boolean)}
  • * */ Index: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilder.java =================================================================== --- oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilder.java (nonexistent) +++ oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilder.java (working copy) @@ -0,0 +1,750 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.document; + +import java.util.EnumMap; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.concurrent.Executor; + +import javax.annotation.CheckForNull; +import javax.annotation.Nonnull; + +import com.google.common.base.Predicate; +import com.google.common.base.Predicates; +import com.google.common.base.Supplier; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.RemovalCause; +import com.google.common.cache.RemovalListener; +import com.google.common.cache.RemovalNotification; +import com.google.common.cache.Weigher; +import com.google.common.util.concurrent.MoreExecutors; + +import org.apache.jackrabbit.oak.cache.CacheLIRS; +import org.apache.jackrabbit.oak.cache.CacheStats; +import org.apache.jackrabbit.oak.cache.CacheValue; +import org.apache.jackrabbit.oak.cache.EmpiricalWeigher; +import org.apache.jackrabbit.oak.plugins.blob.BlobStoreStats; +import org.apache.jackrabbit.oak.plugins.blob.CachingBlobStore; +import org.apache.jackrabbit.oak.plugins.blob.ReferencedBlob; +import org.apache.jackrabbit.oak.plugins.document.cache.NodeDocumentCache; +import org.apache.jackrabbit.oak.plugins.document.locks.NodeDocumentLocks; +import org.apache.jackrabbit.oak.plugins.document.memory.MemoryDocumentStore; +import org.apache.jackrabbit.oak.plugins.document.persistentCache.CacheType; +import org.apache.jackrabbit.oak.plugins.document.persistentCache.EvictionListener; +import org.apache.jackrabbit.oak.plugins.document.persistentCache.PersistentCache; +import org.apache.jackrabbit.oak.plugins.document.persistentCache.PersistentCacheStats; +import org.apache.jackrabbit.oak.plugins.document.util.RevisionsKey; +import org.apache.jackrabbit.oak.plugins.document.util.StringValue; +import org.apache.jackrabbit.oak.spi.blob.AbstractBlobStore; +import org.apache.jackrabbit.oak.spi.blob.BlobStore; +import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore; +import org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore; +import org.apache.jackrabbit.oak.spi.gc.GCMonitor; +import org.apache.jackrabbit.oak.spi.gc.LoggingGCMonitor; +import org.apache.jackrabbit.oak.stats.Clock; +import org.apache.jackrabbit.oak.stats.StatisticsProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Suppliers.ofInstance; +import static org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreService.DEFAULT_JOURNAL_GC_MAX_AGE_MILLIS; + +/** + * A generic builder for a {@link DocumentNodeStore}. By default the builder + * will create an in-memory {@link DocumentNodeStore}. In most cases this is + * only useful for tests. + */ +public class DocumentNodeStoreBuilder> { + + private static final Logger LOG = LoggerFactory.getLogger(DocumentNodeStoreBuilder.class); + + public static final long DEFAULT_MEMORY_CACHE_SIZE = 256 * 1024 * 1024; + public static final int DEFAULT_NODE_CACHE_PERCENTAGE = 35; + public static final int DEFAULT_PREV_DOC_CACHE_PERCENTAGE = 4; + public static final int DEFAULT_CHILDREN_CACHE_PERCENTAGE = 15; + public static final int DEFAULT_DIFF_CACHE_PERCENTAGE = 30; + public static final int DEFAULT_CACHE_SEGMENT_COUNT = 16; + public static final int DEFAULT_CACHE_STACK_MOVE_DISTANCE = 16; + public static final int DEFAULT_UPDATE_LIMIT = 100000; + + /** + * The path where the persistent cache is stored. + */ + private static final String DEFAULT_PERSISTENT_CACHE_URI = + System.getProperty("oak.documentMK.persCache"); + + /** + * The threshold where special handling for many child node starts. + */ + static final int MANY_CHILDREN_THRESHOLD = Integer.getInteger( + "oak.documentMK.manyChildren", 50); + + /** + * Enable or disable the LIRS cache (null to use the default setting for this configuration). + */ + private static final Boolean LIRS_CACHE; + + static { + String s = System.getProperty("oak.documentMK.lirsCache"); + LIRS_CACHE = s == null ? null : Boolean.parseBoolean(s); + } + + /** + * Enable fast diff operations. + */ + static final boolean FAST_DIFF = Boolean.parseBoolean( + System.getProperty("oak.documentMK.fastDiff", "true")); + + /** + * Number of content updates that need to happen before the updates + * are automatically purged to the private branch. + */ + static final int UPDATE_LIMIT = Integer.getInteger("update.limit", DEFAULT_UPDATE_LIMIT); + + protected Supplier documentStoreSupplier = ofInstance(new MemoryDocumentStore()); + protected BlobStore blobStore; + private DiffCache diffCache; + private int clusterId = Integer.getInteger("oak.documentMK.clusterId", 0); + private int asyncDelay = 1000; + private boolean timing; + private boolean logging; + private boolean leaseCheck = true; // OAK-2739 is enabled by default also for non-osgi + private boolean isReadOnlyMode = false; + private Weigher weigher = new EmpiricalWeigher(); + private long memoryCacheSize = DEFAULT_MEMORY_CACHE_SIZE; + private int nodeCachePercentage = DEFAULT_NODE_CACHE_PERCENTAGE; + private int prevDocCachePercentage = DEFAULT_PREV_DOC_CACHE_PERCENTAGE; + private int childrenCachePercentage = DEFAULT_CHILDREN_CACHE_PERCENTAGE; + private int diffCachePercentage = DEFAULT_DIFF_CACHE_PERCENTAGE; + private int cacheSegmentCount = DEFAULT_CACHE_SEGMENT_COUNT; + private int cacheStackMoveDistance = DEFAULT_CACHE_STACK_MOVE_DISTANCE; + private boolean useSimpleRevision; + private boolean disableBranches; + private boolean prefetchExternalChanges; + private Clock clock = Clock.SIMPLE; + private Executor executor; + private String persistentCacheURI = DEFAULT_PERSISTENT_CACHE_URI; + private PersistentCache persistentCache; + private String journalCacheURI; + private PersistentCache journalCache; + private LeaseFailureHandler leaseFailureHandler; + private StatisticsProvider statisticsProvider = StatisticsProvider.NOOP; + private BlobStoreStats blobStoreStats; + private CacheStats blobStoreCacheStats; + private DocumentStoreStatsCollector documentStoreStatsCollector; + private DocumentNodeStoreStatsCollector nodeStoreStatsCollector; + private Map persistentCacheStats = + new EnumMap(CacheType.class); + private boolean bundlingDisabled; + private JournalPropertyHandlerFactory journalPropertyHandlerFactory = + new JournalPropertyHandlerFactory(); + private int updateLimit = UPDATE_LIMIT; + private int commitValueCacheSize = 10000; + private long maxRevisionAgeMillis = DEFAULT_JOURNAL_GC_MAX_AGE_MILLIS; + private GCMonitor gcMonitor = new LoggingGCMonitor( + LoggerFactory.getLogger(VersionGarbageCollector.class)); + private Predicate nodeCachePredicate = Predicates.alwaysTrue(); + + /** + * @return a new {@link DocumentNodeStoreBuilder}. + */ + public static DocumentNodeStoreBuilder newDocumentNodeStoreBuilder() { + return new DocumentNodeStoreBuilder(); + } + + public DocumentNodeStore build() { + return new DocumentNodeStore(this); + } + + @SuppressWarnings("unchecked") + protected T thisBuilder() { + return (T) this; + } + + /** + * Sets the persistent cache option. + * + * @return this + */ + public T setPersistentCache(String persistentCache) { + this.persistentCacheURI = persistentCache; + return thisBuilder(); + } + + /** + * Sets the journal cache option. + * + * @return this + */ + public T setJournalCache(String journalCache) { + this.journalCacheURI = journalCache; + return thisBuilder(); + } + + /** + * Use the timing document store wrapper. + * + * @param timing whether to use the timing wrapper. + * @return this + */ + public T setTiming(boolean timing) { + this.timing = timing; + return thisBuilder(); + } + + public boolean getTiming() { + return timing; + } + + public T setLogging(boolean logging) { + this.logging = logging; + return thisBuilder(); + } + + public boolean getLogging() { + return logging; + } + + public T setLeaseCheck(boolean leaseCheck) { + this.leaseCheck = leaseCheck; + return thisBuilder(); + } + + public boolean getLeaseCheck() { + return leaseCheck; + } + + public T setReadOnlyMode() { + this.isReadOnlyMode = true; + return thisBuilder(); + } + + public boolean getReadOnlyMode() { + return isReadOnlyMode; + } + + public T setLeaseFailureHandler(LeaseFailureHandler leaseFailureHandler) { + this.leaseFailureHandler = leaseFailureHandler; + return thisBuilder(); + } + + public LeaseFailureHandler getLeaseFailureHandler() { + return leaseFailureHandler; + } + + /** + * Set the document store to use. By default an in-memory store is used. + * + * @param documentStore the document store + * @return this + */ + public T setDocumentStore(DocumentStore documentStore) { + this.documentStoreSupplier = ofInstance(documentStore); + return thisBuilder(); + } + + public DocumentStore getDocumentStore() { + return documentStoreSupplier.get(); + } + + public DiffCache getDiffCache() { + if (diffCache == null) { + diffCache = new TieredDiffCache(this); + } + return diffCache; + } + + public T setDiffCache(DiffCache diffCache) { + this.diffCache = diffCache; + return thisBuilder(); + } + + /** + * Set the blob store to use. By default an in-memory store is used. + * + * @param blobStore the blob store + * @return this + */ + public T setBlobStore(BlobStore blobStore) { + this.blobStore = blobStore; + return thisBuilder(); + } + + public BlobStore getBlobStore() { + if (blobStore == null) { + blobStore = new MemoryBlobStore(); + configureBlobStore(blobStore); + } + return blobStore; + } + + /** + * Set the cluster id to use. By default, 0 is used, meaning the cluster + * id is automatically generated. + * + * @param clusterId the cluster id + * @return this + */ + public T setClusterId(int clusterId) { + this.clusterId = clusterId; + return thisBuilder(); + } + + public T setCacheSegmentCount(int cacheSegmentCount) { + this.cacheSegmentCount = cacheSegmentCount; + return thisBuilder(); + } + + public T setCacheStackMoveDistance(int cacheSegmentCount) { + this.cacheStackMoveDistance = cacheSegmentCount; + return thisBuilder(); + } + + public int getClusterId() { + return clusterId; + } + + /** + * Set the maximum delay to write the last revision to the root node. By + * default 1000 (meaning 1 second) is used. + * + * @param asyncDelay in milliseconds + * @return this + */ + public T setAsyncDelay(int asyncDelay) { + this.asyncDelay = asyncDelay; + return thisBuilder(); + } + + public int getAsyncDelay() { + return asyncDelay; + } + + public Weigher getWeigher() { + return weigher; + } + + public T withWeigher(Weigher weigher) { + this.weigher = weigher; + return thisBuilder(); + } + + public T memoryCacheSize(long memoryCacheSize) { + this.memoryCacheSize = memoryCacheSize; + return thisBuilder(); + } + + public T memoryCacheDistribution(int nodeCachePercentage, + int prevDocCachePercentage, + int childrenCachePercentage, + int diffCachePercentage) { + checkArgument(nodeCachePercentage >= 0); + checkArgument(prevDocCachePercentage >= 0); + checkArgument(childrenCachePercentage>= 0); + checkArgument(diffCachePercentage >= 0); + checkArgument(nodeCachePercentage + prevDocCachePercentage + childrenCachePercentage + + diffCachePercentage < 100); + this.nodeCachePercentage = nodeCachePercentage; + this.prevDocCachePercentage = prevDocCachePercentage; + this.childrenCachePercentage = childrenCachePercentage; + this.diffCachePercentage = diffCachePercentage; + return thisBuilder(); + } + + public long getNodeCacheSize() { + return memoryCacheSize * nodeCachePercentage / 100; + } + + public long getPrevDocumentCacheSize() { + return memoryCacheSize * prevDocCachePercentage / 100; + } + + public long getChildrenCacheSize() { + return memoryCacheSize * childrenCachePercentage / 100; + } + + public long getDocumentCacheSize() { + return memoryCacheSize - getNodeCacheSize() - getPrevDocumentCacheSize() - getChildrenCacheSize() + - getDiffCacheSize(); + } + + public long getDiffCacheSize() { + return memoryCacheSize * diffCachePercentage / 100; + } + + public long getMemoryDiffCacheSize() { + return getDiffCacheSize() / 2; + } + + public long getLocalDiffCacheSize() { + return getDiffCacheSize() / 2; + } + + public T setUseSimpleRevision(boolean useSimpleRevision) { + this.useSimpleRevision = useSimpleRevision; + return thisBuilder(); + } + + public boolean isUseSimpleRevision() { + return useSimpleRevision; + } + + public Executor getExecutor() { + if(executor == null){ + return MoreExecutors.sameThreadExecutor(); + } + return executor; + } + + public T setExecutor(Executor executor){ + this.executor = executor; + return thisBuilder(); + } + + public T clock(Clock clock) { + this.clock = clock; + return thisBuilder(); + } + + public T setStatisticsProvider(StatisticsProvider statisticsProvider){ + this.statisticsProvider = statisticsProvider; + return thisBuilder(); + } + + public StatisticsProvider getStatisticsProvider() { + return this.statisticsProvider; + } + public DocumentStoreStatsCollector getDocumentStoreStatsCollector() { + if (documentStoreStatsCollector == null) { + documentStoreStatsCollector = new DocumentStoreStats(statisticsProvider); + } + return documentStoreStatsCollector; + } + + public T setDocumentStoreStatsCollector(DocumentStoreStatsCollector documentStoreStatsCollector) { + this.documentStoreStatsCollector = documentStoreStatsCollector; + return thisBuilder(); + } + + public DocumentNodeStoreStatsCollector getNodeStoreStatsCollector() { + if (nodeStoreStatsCollector == null) { + nodeStoreStatsCollector = new DocumentNodeStoreStats(statisticsProvider); + } + return nodeStoreStatsCollector; + } + + public T setNodeStoreStatsCollector(DocumentNodeStoreStatsCollector statsCollector) { + this.nodeStoreStatsCollector = statsCollector; + return thisBuilder(); + } + + @Nonnull + public Map getPersistenceCacheStats() { + return persistentCacheStats; + } + + @CheckForNull + public BlobStoreStats getBlobStoreStats() { + return blobStoreStats; + } + + @CheckForNull + public CacheStats getBlobStoreCacheStats() { + return blobStoreCacheStats; + } + + public Clock getClock() { + return clock; + } + + public T disableBranches() { + disableBranches = true; + return thisBuilder(); + } + + public boolean isDisableBranches() { + return disableBranches; + } + + public T setBundlingDisabled(boolean enabled) { + bundlingDisabled = enabled; + return thisBuilder(); + } + + public boolean isBundlingDisabled() { + return bundlingDisabled; + } + + public T setPrefetchExternalChanges(boolean b) { + prefetchExternalChanges = b; + return thisBuilder(); + } + + public boolean isPrefetchExternalChanges() { + return prefetchExternalChanges; + } + + public T setJournalPropertyHandlerFactory(JournalPropertyHandlerFactory factory) { + journalPropertyHandlerFactory = factory; + return thisBuilder(); + } + + public JournalPropertyHandlerFactory getJournalPropertyHandlerFactory() { + return journalPropertyHandlerFactory; + } + + public T setUpdateLimit(int limit) { + updateLimit = limit; + return thisBuilder(); + } + + public int getUpdateLimit() { + return updateLimit; + } + + public T setCommitValueCacheSize(int cacheSize) { + this.commitValueCacheSize = cacheSize; + return thisBuilder(); + } + + public int getCommitValueCacheSize() { + return commitValueCacheSize; + } + + public T setJournalGCMaxAge(long maxRevisionAgeMillis) { + this.maxRevisionAgeMillis = maxRevisionAgeMillis; + return thisBuilder(); + } + + /** + * The maximum age for journal entries in milliseconds. Older entries + * are candidates for GC. + * + * @return maximum age for journal entries in milliseconds. + */ + public long getJournalGCMaxAge() { + return maxRevisionAgeMillis; + } + + public T setGCMonitor(@Nonnull GCMonitor gcMonitor) { + this.gcMonitor = checkNotNull(gcMonitor); + return thisBuilder(); + } + + public GCMonitor getGCMonitor() { + return gcMonitor; + } + + public VersionGCSupport createVersionGCSupport() { + return new VersionGCSupport(getDocumentStore()); + } + + public Iterable createReferencedBlobs(final DocumentNodeStore ns) { + return () -> new BlobReferenceIterator(ns); + } + + public MissingLastRevSeeker createMissingLastRevSeeker() { + return new MissingLastRevSeeker(getDocumentStore(), getClock()); + } + + public Cache buildNodeCache(DocumentNodeStore store) { + return buildCache(CacheType.NODE, getNodeCacheSize(), store, null); + } + + public Cache buildChildrenCache(DocumentNodeStore store) { + return buildCache(CacheType.CHILDREN, getChildrenCacheSize(), store, null); + } + + public Cache buildMemoryDiffCache() { + return buildCache(CacheType.DIFF, getMemoryDiffCacheSize(), null, null); + } + + public Cache buildLocalDiffCache() { + return buildCache(CacheType.LOCAL_DIFF, getLocalDiffCacheSize(), null, null); + } + + public Cache buildDocumentCache(DocumentStore docStore) { + return buildCache(CacheType.DOCUMENT, getDocumentCacheSize(), null, docStore); + } + + public Cache buildPrevDocumentsCache(DocumentStore docStore) { + return buildCache(CacheType.PREV_DOCUMENT, getPrevDocumentCacheSize(), null, docStore); + } + + public NodeDocumentCache buildNodeDocumentCache(DocumentStore docStore, NodeDocumentLocks locks) { + Cache nodeDocumentsCache = buildDocumentCache(docStore); + CacheStats nodeDocumentsCacheStats = new CacheStats(nodeDocumentsCache, "Document-Documents", getWeigher(), getDocumentCacheSize()); + + Cache prevDocumentsCache = buildPrevDocumentsCache(docStore); + CacheStats prevDocumentsCacheStats = new CacheStats(prevDocumentsCache, "Document-PrevDocuments", getWeigher(), getPrevDocumentCacheSize()); + + return new NodeDocumentCache(nodeDocumentsCache, nodeDocumentsCacheStats, prevDocumentsCache, prevDocumentsCacheStats, locks); + } + + public T setNodeCachePredicate(Predicate p){ + this.nodeCachePredicate = p; + return thisBuilder(); + } + + public Predicate getNodeCachePredicate() { + return nodeCachePredicate; + } + + @SuppressWarnings("unchecked") + private Cache buildCache( + CacheType cacheType, + long maxWeight, + DocumentNodeStore docNodeStore, + DocumentStore docStore) { + Set> listeners = new CopyOnWriteArraySet>(); + Cache cache = buildCache(cacheType.name(), maxWeight, listeners); + PersistentCache p = null; + if (cacheType == CacheType.DIFF || cacheType == CacheType.LOCAL_DIFF) { + // use separate journal cache if configured + p = getJournalCache(); + } + if (p == null) { + // otherwise fall back to single persistent cache + p = getPersistentCache(); + } + if (p != null) { + cache = p.wrap(docNodeStore, docStore, cache, cacheType, statisticsProvider); + if (cache instanceof EvictionListener) { + listeners.add((EvictionListener) cache); + } + PersistentCacheStats stats = PersistentCache.getPersistentCacheStats(cache); + if (stats != null) { + persistentCacheStats.put(cacheType, stats); + } + } + return cache; + } + + public PersistentCache getPersistentCache() { + if (persistentCacheURI == null) { + return null; + } + if (persistentCache == null) { + try { + persistentCache = new PersistentCache(persistentCacheURI); + } catch (Throwable e) { + LOG.warn("Persistent cache not available; please disable the configuration", e); + throw new IllegalArgumentException(e); + } + } + return persistentCache; + } + + PersistentCache getJournalCache() { + if (journalCacheURI == null) { + return null; + } + if (journalCache == null) { + try { + journalCache = new PersistentCache(journalCacheURI); + } catch (Throwable e) { + LOG.warn("Journal cache not available; please disable the configuration", e); + throw new IllegalArgumentException(e); + } + } + return journalCache; + } + + private Cache buildCache( + String module, + long maxWeight, + final Set> listeners) { + // by default, use the LIRS cache when using the persistent cache, + // but don't use it otherwise + boolean useLirs = persistentCacheURI != null; + // allow to override this by using the system property + if (LIRS_CACHE != null) { + useLirs = LIRS_CACHE; + } + // do not use LIRS cache when maxWeight is zero (OAK-6953) + if (useLirs && maxWeight > 0) { + return CacheLIRS.newBuilder(). + module(module). + weigher(new Weigher() { + @Override + public int weigh(K key, V value) { + return weigher.weigh(key, value); + } + }). + averageWeight(2000). + maximumWeight(maxWeight). + segmentCount(cacheSegmentCount). + stackMoveDistance(cacheStackMoveDistance). + recordStats(). + evictionCallback(new CacheLIRS.EvictionCallback() { + @Override + public void evicted(K key, V value, RemovalCause cause) { + for (EvictionListener l : listeners) { + l.evicted(key, value, cause); + } + } + }). + build(); + } + return CacheBuilder.newBuilder(). + concurrencyLevel(cacheSegmentCount). + weigher(weigher). + maximumWeight(maxWeight). + recordStats(). + removalListener(new RemovalListener() { + @Override + public void onRemoval(RemovalNotification notification) { + for (EvictionListener l : listeners) { + l.evicted(notification.getKey(), notification.getValue(), notification.getCause()); + } + } + }). + build(); + } + + protected void setGCBlobStore(GarbageCollectableBlobStore s) { + configureBlobStore(s); + PersistentCache p = getPersistentCache(); + if (p != null) { + s = p.wrapBlobStore(s); + } + this.blobStore = s; + } + + /** + * BlobStore which are created by builder might get wrapped. + * So here we perform any configuration and also access any + * service exposed by the store + * + * @param blobStore store to config + */ + private void configureBlobStore(BlobStore blobStore) { + if (blobStore instanceof AbstractBlobStore){ + this.blobStoreStats = new BlobStoreStats(statisticsProvider); + ((AbstractBlobStore) blobStore).setStatsCollector(blobStoreStats); + } + + if (blobStore instanceof CachingBlobStore){ + blobStoreCacheStats = ((CachingBlobStore) blobStore).getCacheStats(); + } + } +} Property changes on: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBuilder.java ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java =================================================================== --- oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java (revision 1817088) +++ oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreService.java (working copy) @@ -23,8 +23,10 @@ import static java.util.Collections.emptyList; import static org.apache.jackrabbit.oak.commons.IOUtils.closeQuietly; import static org.apache.jackrabbit.oak.commons.PropertiesUtil.toLong; -import static org.apache.jackrabbit.oak.plugins.document.DocumentMK.Builder.DEFAULT_MEMORY_CACHE_SIZE; +import static org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder.DEFAULT_MEMORY_CACHE_SIZE; import static org.apache.jackrabbit.oak.plugins.document.NodeDocument.MODIFIED_IN_SECS_RESOLUTION; +import static org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentNodeStoreBuilder.newMongoDocumentNodeStoreBuilder; +import static org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentNodeStoreBuilder.newRDBDocumentNodeStoreBuilder; import static org.apache.jackrabbit.oak.spi.blob.osgi.SplitBlobStoreService.ONLY_STANDALONE_TARGET; import static org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardUtils.registerMBean; import static org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardUtils.scheduleWithFixedDelay; @@ -63,6 +65,8 @@ import org.apache.jackrabbit.oak.api.jmx.PersistentCacheStatsMBean; import org.apache.jackrabbit.oak.cache.CacheStats; import org.apache.jackrabbit.oak.plugins.document.VersionGarbageCollector.VersionGCStats; +import org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentNodeStoreBuilder; +import org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentNodeStoreBuilder; import org.apache.jackrabbit.oak.plugins.document.util.Utils; import org.apache.jackrabbit.oak.spi.commit.ObserverTracker; import org.apache.jackrabbit.oak.osgi.OsgiWhiteboard; @@ -248,77 +252,23 @@ } private void registerNodeStore() throws IOException { - String persistentCache = resolvePath(config.persistentCache(), DEFAULT_PERSISTENT_CACHE); - String journalCache = resolvePath(config.journalCache(), DEFAULT_JOURNAL_CACHE); - DocumentMK.Builder mkBuilder = - new DocumentMK.Builder(). - setStatisticsProvider(statisticsProvider). - memoryCacheSize(config.cache() * MB). - memoryCacheDistribution( - config.nodeCachePercentage(), - config.prevDocCachePercentage(), - config.childrenCachePercentage(), - config.diffCachePercentage()). - setCacheSegmentCount(config.cacheSegmentCount()). - setCacheStackMoveDistance(config.cacheStackMoveDistance()). - setBundlingDisabled(config.bundlingDisabled()). - setJournalPropertyHandlerFactory(journalPropertyHandlerFactory). - setLeaseCheck(!ClusterNodeInfo.DEFAULT_LEASE_CHECK_DISABLED /* OAK-2739: enabled by default */). - setLeaseFailureHandler(new LeaseFailureHandler() { - - @Override - public void handleLeaseFailure() { - try { - // plan A: try stopping oak-core - log.error("handleLeaseFailure: stopping oak-core..."); - Bundle bundle = context.getBundleContext().getBundle(); - bundle.stop(Bundle.STOP_TRANSIENT); - log.error("handleLeaseFailure: stopped oak-core."); - // plan A worked, perfect! - } catch (BundleException e) { - log.error("handleLeaseFailure: exception while stopping oak-core: "+e, e); - // plan B: stop only DocumentNodeStoreService (to stop the background threads) - log.error("handleLeaseFailure: stopping DocumentNodeStoreService..."); - context.disableComponent(DocumentNodeStoreService.class.getName()); - log.error("handleLeaseFailure: stopped DocumentNodeStoreService"); - // plan B succeeded. - } - } - }). - setPrefetchExternalChanges(config.prefetchExternalChanges()). - setUpdateLimit(config.updateLimit()). - setJournalGCMaxAge(config.journalGCMaxAge()). - setNodeCachePredicate(createCachePredicate()); - - if (!Strings.isNullOrEmpty(persistentCache)) { - mkBuilder.setPersistentCache(persistentCache); - } - if (!Strings.isNullOrEmpty(journalCache)) { - mkBuilder.setJournalCache(journalCache); - } - - boolean wrappingCustomBlobStore = customBlobStore && blobStore instanceof BlobStoreWrapper; - - //Set blobstore before setting the DB - if (customBlobStore && !wrappingCustomBlobStore) { - checkNotNull(blobStore, "Use of custom BlobStore enabled via [%s] but blobStore reference not " + - "initialized", CUSTOM_BLOB_STORE); - mkBuilder.setBlobStore(blobStore); - } - + DocumentNodeStoreBuilder mkBuilder; if (documentStoreType == DocumentStoreType.RDB) { + RDBDocumentNodeStoreBuilder builder = newRDBDocumentNodeStoreBuilder(); + configureBuilder(builder); checkNotNull(dataSource, "DataStore type set [%s] but DataSource reference not initialized", PROP_DS_TYPE); if (!customBlobStore) { checkNotNull(blobDataSource, "DataStore type set [%s] but BlobDataSource reference not initialized", PROP_DS_TYPE); - mkBuilder.setRDBConnection(dataSource, blobDataSource); + builder.setRDBConnection(dataSource, blobDataSource); log.info("Connected to datasources {} {}", dataSource, blobDataSource); } else { if (blobDataSource != null && blobDataSource != dataSource) { log.info("Ignoring blobDataSource {} as custom blob store takes precedence.", blobDataSource); } - mkBuilder.setRDBConnection(dataSource); + builder.setRDBConnection(dataSource); log.info("Connected to datasource {}", dataSource); } + mkBuilder = builder; } else { String uri = config.mongouri(); String db = config.db(); @@ -325,6 +275,8 @@ boolean soKeepAlive = config.socketKeepAlive(); MongoClientURI mongoURI = new MongoClientURI(uri); + String persistentCache = resolvePath(config.persistentCache(), DEFAULT_PERSISTENT_CACHE); + String journalCache = resolvePath(config.journalCache(), DEFAULT_JOURNAL_CACHE); if (log.isInfoEnabled()) { // Take care around not logging the uri directly as it @@ -336,9 +288,12 @@ log.info("Mongo Connection details {}", MongoConnection.toString(mongoURI.getOptions())); } - mkBuilder.setMaxReplicationLag(config.maxReplicationLagInSecs(), TimeUnit.SECONDS); - mkBuilder.setSocketKeepAlive(soKeepAlive); - mkBuilder.setMongoDB(uri, db, config.blobCacheSize()); + MongoDocumentNodeStoreBuilder builder = newMongoDocumentNodeStoreBuilder(); + configureBuilder(builder); + builder.setMaxReplicationLag(config.maxReplicationLagInSecs(), TimeUnit.SECONDS); + builder.setSocketKeepAlive(soKeepAlive); + builder.setMongoDB(uri, db, config.blobCacheSize()); + mkBuilder = builder; log.info("Connected to database '{}'", db); } @@ -351,7 +306,7 @@ } //Set wrapping blob store after setting the DB - if (wrappingCustomBlobStore) { + if (isWrappingCustomBlobStore()) { ((BlobStoreWrapper) blobStore).setBlobStore(mkBuilder.getBlobStore()); mkBuilder.setBlobStore(blobStore); } @@ -373,7 +328,7 @@ mkBuilder.setGCMonitor(new DelegatingGCMonitor( newArrayList(gcMonitor, loggingGCMonitor))); - nodeStore = mkBuilder.getNodeStore(); + nodeStore = mkBuilder.build(); // ensure a clusterId is initialized // and expose it as 'oak.clusterid' repository descriptor @@ -463,6 +418,66 @@ nodeStore, props); } + private void configureBuilder(DocumentNodeStoreBuilder builder) { + String persistentCache = resolvePath(config.persistentCache(), DEFAULT_PERSISTENT_CACHE); + String journalCache = resolvePath(config.journalCache(), DEFAULT_JOURNAL_CACHE); + builder.setStatisticsProvider(statisticsProvider). + memoryCacheSize(config.cache() * MB). + memoryCacheDistribution( + config.nodeCachePercentage(), + config.prevDocCachePercentage(), + config.childrenCachePercentage(), + config.diffCachePercentage()). + setCacheSegmentCount(config.cacheSegmentCount()). + setCacheStackMoveDistance(config.cacheStackMoveDistance()). + setBundlingDisabled(config.bundlingDisabled()). + setJournalPropertyHandlerFactory(journalPropertyHandlerFactory). + setLeaseCheck(!ClusterNodeInfo.DEFAULT_LEASE_CHECK_DISABLED /* OAK-2739: enabled by default */). + setLeaseFailureHandler(new LeaseFailureHandler() { + + @Override + public void handleLeaseFailure() { + try { + // plan A: try stopping oak-core + log.error("handleLeaseFailure: stopping oak-core..."); + Bundle bundle = context.getBundleContext().getBundle(); + bundle.stop(Bundle.STOP_TRANSIENT); + log.error("handleLeaseFailure: stopped oak-core."); + // plan A worked, perfect! + } catch (BundleException e) { + log.error("handleLeaseFailure: exception while stopping oak-core: "+e, e); + // plan B: stop only DocumentNodeStoreService (to stop the background threads) + log.error("handleLeaseFailure: stopping DocumentNodeStoreService..."); + context.disableComponent(DocumentNodeStoreService.class.getName()); + log.error("handleLeaseFailure: stopped DocumentNodeStoreService"); + // plan B succeeded. + } + } + }). + setPrefetchExternalChanges(config.prefetchExternalChanges()). + setUpdateLimit(config.updateLimit()). + setJournalGCMaxAge(config.journalGCMaxAge()). + setNodeCachePredicate(createCachePredicate()); + + if (!Strings.isNullOrEmpty(persistentCache)) { + builder.setPersistentCache(persistentCache); + } + if (!Strings.isNullOrEmpty(journalCache)) { + builder.setJournalCache(journalCache); + } + + //Set blobstore before setting the document store + if (customBlobStore && !isWrappingCustomBlobStore()) { + checkNotNull(blobStore, "Use of custom BlobStore enabled via [%s] but blobStore reference not " + + "initialized", CUSTOM_BLOB_STORE); + builder.setBlobStore(blobStore); + } + } + + private boolean isWrappingCustomBlobStore() { + return customBlobStore && blobStore instanceof BlobStoreWrapper; + } + private Predicate createCachePredicate() { if (config.persistentCacheIncludes().length == 0) { return Predicates.alwaysTrue(); @@ -671,7 +686,7 @@ } } - private void registerJMXBeans(final DocumentNodeStore store, DocumentMK.Builder mkBuilder) throws + private void registerJMXBeans(final DocumentNodeStore store, DocumentNodeStoreBuilder mkBuilder) throws IOException { registerCacheStatsMBean(store.getNodeCacheStats()); registerCacheStatsMBean(store.getNodeChildrenCacheStats()); Index: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/LocalDiffCache.java =================================================================== --- oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/LocalDiffCache.java (revision 1817088) +++ oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/LocalDiffCache.java (working copy) @@ -49,7 +49,7 @@ private final Cache diffCache; private final CacheStats diffCacheStats; - LocalDiffCache(DocumentMK.Builder builder) { + LocalDiffCache(DocumentNodeStoreBuilder builder) { this.diffCache = builder.buildLocalDiffCache(); this.diffCacheStats = new CacheStats(diffCache, "Document-LocalDiff", Index: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/MemoryDiffCache.java =================================================================== --- oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/MemoryDiffCache.java (revision 1817088) +++ oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/MemoryDiffCache.java (working copy) @@ -59,7 +59,7 @@ protected final CacheStats diffCacheStats; - protected MemoryDiffCache(DocumentMK.Builder builder) { + protected MemoryDiffCache(DocumentNodeStoreBuilder builder) { diffCache = builder.buildMemoryDiffCache(); diffCacheStats = new CacheStats(diffCache, "Document-MemoryDiff", builder.getWeigher(), builder.getMemoryDiffCacheSize()); Index: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/TieredDiffCache.java =================================================================== --- oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/TieredDiffCache.java (revision 1817088) +++ oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/TieredDiffCache.java (working copy) @@ -32,7 +32,7 @@ private final DiffCache localCache; private final DiffCache memoryCache; - TieredDiffCache(DocumentMK.Builder builder) { + TieredDiffCache(DocumentNodeStoreBuilder builder) { this.localCache = new LocalDiffCache(builder); this.memoryCache = new MemoryDiffCache(builder); } Index: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentNodeStoreBuilder.java =================================================================== --- oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentNodeStoreBuilder.java (nonexistent) +++ oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentNodeStoreBuilder.java (working copy) @@ -0,0 +1,222 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.document.mongo; + +import java.net.UnknownHostException; +import java.util.concurrent.TimeUnit; + +import javax.annotation.Nonnull; + +import com.mongodb.DB; +import com.mongodb.MongoClientOptions; +import com.mongodb.ReadConcernLevel; + +import org.apache.jackrabbit.oak.plugins.blob.ReferencedBlob; +import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore; +import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder; +import org.apache.jackrabbit.oak.plugins.document.DocumentStore; +import org.apache.jackrabbit.oak.plugins.document.MissingLastRevSeeker; +import org.apache.jackrabbit.oak.plugins.document.VersionGCSupport; +import org.apache.jackrabbit.oak.plugins.document.util.MongoConnection; +import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static com.google.common.base.Suppliers.memoize; +import static org.apache.jackrabbit.oak.plugins.document.util.MongoConnection.readConcernLevel; + +/** + * A builder for a {@link DocumentNodeStore} backed by MongoDB. + */ +public class MongoDocumentNodeStoreBuilder> + extends DocumentNodeStoreBuilder { + + private static final Logger LOG = LoggerFactory.getLogger(MongoDocumentNodeStoreBuilder.class); + + private String mongoUri; + private boolean socketKeepAlive; + private MongoStatus mongoStatus; + private long maxReplicationLagMillis = TimeUnit.HOURS.toMillis(6); + + /** + * @return a new {@link MongoDocumentNodeStoreBuilder}. + */ + public static MongoDocumentNodeStoreBuilder newMongoDocumentNodeStoreBuilder() { + return new MongoDocumentNodeStoreBuilder(); + } + + @SuppressWarnings("unchecked") + @Override + protected T thisBuilder() { + return (T) this; + } + + /** + * Uses the given information to connect to to MongoDB as backend + * storage for the DocumentNodeStore. The write concern is either + * taken from the URI or determined automatically based on the MongoDB + * setup. When running on a replica set without explicit write concern + * in the URI, the write concern will be {@code MAJORITY}, otherwise + * {@code ACKNOWLEDGED}. + * + * @param uri a MongoDB URI. + * @param name the name of the database to connect to. This overrides + * any database name given in the {@code uri}. + * @param blobCacheSizeMB the blob cache size in MB. + * @return this + * @throws UnknownHostException if one of the hosts given in the URI + * is unknown. + */ + public T setMongoDB(@Nonnull String uri, + @Nonnull String name, + int blobCacheSizeMB) + throws UnknownHostException { + this.mongoUri = uri; + + MongoClientOptions.Builder options = MongoConnection.getDefaultBuilder(); + options.socketKeepAlive(socketKeepAlive); + DB db = new MongoConnection(uri, options).getDB(name); + MongoStatus status = new MongoStatus(db); + if (!MongoConnection.hasWriteConcern(uri)) { + db.setWriteConcern(MongoConnection.getDefaultWriteConcern(db)); + } + if (status.isMajorityReadConcernSupported() && status.isMajorityReadConcernEnabled() && !MongoConnection.hasReadConcern(uri)) { + db.setReadConcern(MongoConnection.getDefaultReadConcern(db)); + } + setMongoDB(db, status, blobCacheSizeMB); + return thisBuilder(); + } + + /** + * Use the given MongoDB as backend storage for the DocumentNodeStore. + * + * @param db the MongoDB connection + * @return this + */ + public T setMongoDB(@Nonnull DB db, + int blobCacheSizeMB) { + return setMongoDB(db, new MongoStatus(db), blobCacheSizeMB); + } + + /** + * Use the given MongoDB as backend storage for the DocumentNodeStore. + * + * @param db the MongoDB connection + * @return this + */ + public T setMongoDB(@Nonnull DB db) { + return setMongoDB(db, 16); + } + + /** + * Enables the socket keep-alive option for MongoDB. The default is + * disabled. + * + * @param enable whether to enable it. + * @return this + */ + public T setSocketKeepAlive(boolean enable) { + this.socketKeepAlive = enable; + return thisBuilder(); + } + + public T setMaxReplicationLag(long duration, TimeUnit unit){ + maxReplicationLagMillis = unit.toMillis(duration); + return thisBuilder(); + } + + public VersionGCSupport createVersionGCSupport() { + DocumentStore store = getDocumentStore(); + if (store instanceof MongoDocumentStore) { + return new MongoVersionGCSupport((MongoDocumentStore) store); + } else { + return super.createVersionGCSupport(); + } + } + + public Iterable createReferencedBlobs(DocumentNodeStore ns) { + final DocumentStore store = getDocumentStore(); + if (store instanceof MongoDocumentStore) { + return () -> new MongoBlobReferenceIterator(ns, (MongoDocumentStore) store); + } else { + return super.createReferencedBlobs(ns); + } + } + + public MissingLastRevSeeker createMissingLastRevSeeker() { + final DocumentStore store = getDocumentStore(); + if (store instanceof MongoDocumentStore) { + return new MongoMissingLastRevSeeker((MongoDocumentStore) store, getClock()); + } else { + return super.createMissingLastRevSeeker(); + } + } + + /** + * Returns the Mongo URI used in the {@link #setMongoDB(String, String, int)} method. + * + * @return the Mongo URI or null if the {@link #setMongoDB(String, String, int)} method hasn't + * been called. + */ + String getMongoUri() { + return mongoUri; + } + + /** + * Returns the status of the Mongo server configured in the {@link #setMongoDB(String, String, int)} method. + * + * @return the status or null if the {@link #setMongoDB(String, String, int)} method hasn't + * been called. + */ + MongoStatus getMongoStatus() { + return mongoStatus; + } + + long getMaxReplicationLagMillis() { + return maxReplicationLagMillis; + } + + private T setMongoDB(@Nonnull DB db, + MongoStatus status, + int blobCacheSizeMB) { + if (!MongoConnection.hasSufficientWriteConcern(db)) { + LOG.warn("Insufficient write concern: " + db.getWriteConcern() + + " At least " + MongoConnection.getDefaultWriteConcern(db) + " is recommended."); + } + if (status.isMajorityReadConcernSupported() && !status.isMajorityReadConcernEnabled()) { + LOG.warn("The read concern should be enabled on mongod using --enableMajorityReadConcern"); + } else if (status.isMajorityReadConcernSupported() && !MongoConnection.hasSufficientReadConcern(db)) { + ReadConcernLevel currentLevel = readConcernLevel(db.getReadConcern()); + ReadConcernLevel recommendedLevel = readConcernLevel(MongoConnection.getDefaultReadConcern(db)); + if (currentLevel == null) { + LOG.warn("Read concern hasn't been set. At least " + recommendedLevel + " is recommended."); + } else { + LOG.warn("Insufficient read concern: " + currentLevel + ". At least " + recommendedLevel + " is recommended."); + } + } + + this.mongoStatus = status; + this.documentStoreSupplier = memoize(() -> new MongoDocumentStore( + db, MongoDocumentNodeStoreBuilder.this)); + + if (this.blobStore == null) { + GarbageCollectableBlobStore s = new MongoBlobStore(db, blobCacheSizeMB * 1024 * 1024L); + setGCBlobStore(s); + } + return thisBuilder(); + } +} Property changes on: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentNodeStoreBuilder.java ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java =================================================================== --- oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java (revision 1817088) +++ oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java (working copy) @@ -53,7 +53,6 @@ import org.apache.jackrabbit.oak.cache.CacheValue; import org.apache.jackrabbit.oak.plugins.document.Collection; import org.apache.jackrabbit.oak.plugins.document.Document; -import org.apache.jackrabbit.oak.plugins.document.DocumentMK; import org.apache.jackrabbit.oak.plugins.document.DocumentStore; import org.apache.jackrabbit.oak.plugins.document.DocumentStoreException; import org.apache.jackrabbit.oak.plugins.document.DocumentStoreStatsCollector; @@ -230,7 +229,7 @@ private static final Key KEY_MODIFIED = new Key(MODIFIED_IN_SECS, null); - public MongoDocumentStore(DB db, DocumentMK.Builder builder) { + public MongoDocumentStore(DB db, MongoDocumentNodeStoreBuilder builder) { MongoStatus mongoStatus = builder.getMongoStatus(); if (mongoStatus == null) { mongoStatus = new MongoStatus(db); Index: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentNodeStoreBuilder.java =================================================================== --- oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentNodeStoreBuilder.java (nonexistent) +++ oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentNodeStoreBuilder.java (working copy) @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.jackrabbit.oak.plugins.document.rdb; + +import javax.sql.DataSource; + +import org.apache.jackrabbit.oak.plugins.blob.ReferencedBlob; +import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore; +import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder; +import org.apache.jackrabbit.oak.plugins.document.DocumentStore; +import org.apache.jackrabbit.oak.plugins.document.VersionGCSupport; +import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore; + +import static com.google.common.base.Suppliers.ofInstance; + +/** + * A builder for a {@link DocumentNodeStore} backed by a relational database. + */ +public class RDBDocumentNodeStoreBuilder> + extends DocumentNodeStoreBuilder { + + /** + * @return a new {@link RDBDocumentNodeStoreBuilder}. + */ + public static RDBDocumentNodeStoreBuilder newRDBDocumentNodeStoreBuilder() { + return new RDBDocumentNodeStoreBuilder(); + } + + @SuppressWarnings("unchecked") + @Override + protected T thisBuilder() { + return (T) this; + } + + /** + * Sets a {@link DataSource} to use for the RDB document and blob + * stores. + * + * @return this + */ + public T setRDBConnection(DataSource ds) { + setRDBConnection(ds, new RDBOptions()); + return thisBuilder(); + } + + /** + * Sets a {@link DataSource} to use for the RDB document and blob + * stores, including {@link RDBOptions}. + * + * @return this + */ + public T setRDBConnection(DataSource ds, RDBOptions options) { + this.documentStoreSupplier = ofInstance(new RDBDocumentStore(ds, this, options)); + if(blobStore == null) { + GarbageCollectableBlobStore s = new RDBBlobStore(ds, options); + setGCBlobStore(s); + } + return thisBuilder(); + } + + /** + * Sets a {@link DataSource}s to use for the RDB document and blob + * stores. + * + * @return this + */ + public T setRDBConnection(DataSource documentStoreDataSource, DataSource blobStoreDataSource) { + this.documentStoreSupplier = ofInstance(new RDBDocumentStore(documentStoreDataSource, this)); + if(blobStore == null) { + GarbageCollectableBlobStore s = new RDBBlobStore(blobStoreDataSource); + setGCBlobStore(s); + } + return thisBuilder(); + } + + public VersionGCSupport createVersionGCSupport() { + DocumentStore store = getDocumentStore(); + if (store instanceof RDBDocumentStore) { + return new RDBVersionGCSupport((RDBDocumentStore) store); + } else { + return super.createVersionGCSupport(); + } + } + + public Iterable createReferencedBlobs(DocumentNodeStore ns) { + final DocumentStore store = getDocumentStore(); + if (store instanceof RDBDocumentStore) { + return () -> new RDBBlobReferenceIterator(ns, (RDBDocumentStore) store); + } else { + return super.createReferencedBlobs(ns); + } + } +} Property changes on: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentNodeStoreBuilder.java ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java =================================================================== --- oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java (revision 1817088) +++ oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java (working copy) @@ -66,7 +66,7 @@ import org.apache.jackrabbit.oak.cache.CacheValue; import org.apache.jackrabbit.oak.plugins.document.Collection; import org.apache.jackrabbit.oak.plugins.document.Document; -import org.apache.jackrabbit.oak.plugins.document.DocumentMK; +import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreBuilder; import org.apache.jackrabbit.oak.plugins.document.DocumentStore; import org.apache.jackrabbit.oak.plugins.document.DocumentStoreException; import org.apache.jackrabbit.oak.plugins.document.DocumentStoreStatsCollector; @@ -233,9 +233,9 @@ /** * Creates a {@linkplain RDBDocumentStore} instance using the provided - * {@link DataSource}, {@link DocumentMK.Builder}, and {@link RDBOptions}. + * {@link DataSource}, {@link DocumentNodeStoreBuilder}, and {@link RDBOptions}. */ - public RDBDocumentStore(DataSource ds, DocumentMK.Builder builder, RDBOptions options) { + public RDBDocumentStore(DataSource ds, DocumentNodeStoreBuilder builder, RDBOptions options) { try { initialize(ds, builder, options); } catch (Exception ex) { @@ -245,10 +245,10 @@ /** * Creates a {@linkplain RDBDocumentStore} instance using the provided - * {@link DataSource}, {@link DocumentMK.Builder}, and default + * {@link DataSource}, {@link DocumentNodeStoreBuilder}, and default * {@link RDBOptions}. */ - public RDBDocumentStore(DataSource ds, DocumentMK.Builder builder) { + public RDBDocumentStore(DataSource ds, DocumentNodeStoreBuilder builder) { this(ds, builder, new RDBOptions()); } @@ -822,7 +822,7 @@ private final RDBDocumentSerializer ser = new RDBDocumentSerializer(this); - private void initialize(DataSource ds, DocumentMK.Builder builder, RDBOptions options) throws Exception { + private void initialize(DataSource ds, DocumentNodeStoreBuilder builder, RDBOptions options) throws Exception { this.stats = builder.getDocumentStoreStatsCollector(); this.tableMeta.put(Collection.NODES, new RDBTableMetaData(createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.NODES))));