diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/atomic/AtomicCounterEditor.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/atomic/AtomicCounterEditor.java
index a7f9343c3a..84ec55393d 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/atomic/AtomicCounterEditor.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/atomic/AtomicCounterEditor.java
@@ -60,7 +60,7 @@ import org.slf4j.LoggerFactory;
* property ({@link #PROP_COUNTER}) in an atomic way. This will represent an increment or decrement
* of a counter in the case, for example, of Likes or Voting.
*
- *
+ *
*
* Whenever you add a {@link NodeTypeConstants#MIX_ATOMIC_COUNTER} mixin to a node it will turn it
* into an atomic counter. Then in order to increment or decrement the {@code oak:counter} property
@@ -68,59 +68,59 @@ import org.slf4j.LoggerFactory;
* {@code oak:incremement} will never be saved, only the {@code oak:counter} will
* be amended accordingly.
*
- *
+ *
*
* So in order to deal with the counter from a JCR point of view you'll do something as follows
*
- *
+ *
*
* Session session = ...
- *
+ *
* // creating a counter node
* Node counter = session.getRootNode().addNode("mycounter");
* counter.addMixin("mix:atomicCounter"); // or use the NodeTypeConstants
* session.save();
- *
+ *
* // Will output 0. the default value
* System.out.println("counter now: " + counter.getProperty("oak:counter").getLong());
- *
+ *
* // incrementing by 5 the counter
* counter.setProperty("oak:increment", 5);
* session.save();
- *
+ *
* // Will output 5
* System.out.println("counter now: " + counter.getProperty("oak:counter").getLong());
- *
+ *
* // decreasing by 1
* counter.setProperty("oak:increment", -1);
* session.save();
- *
+ *
* // Will output 4
* System.out.println("counter now: " + counter.getProperty("oak:counter").getLong());
- *
+ *
* session.logout();
*
- *
- * Internal behavioural details
- *
+ *
+ * Internal behavioural details
+ *
*
* The related jira ticket is OAK-2472.
* In a nutshell when you save an {@code oak:increment} behind the scene it takes its value and
* increment an internal counter. There will be an individual counter for each cluster node.
*
- *
+ *
*
* Then it will consolidate all the internal counters into a single one: {@code oak:counter}. The
* consolidation process can happen either synchronously or asynchronously. Refer to
* {@link #AtomicCounterEditor(NodeBuilder, String, ScheduledExecutorService, NodeStore, Whiteboard)}
* for details on when it consolidate one way or the other.
*
- *
+ *
*
* synchronous. It means the consolidation, sum of all the internal counters, will
* happen in the same thread. During the lifecycle of the same commit.
*
- *
+ *
*
* asynchronous. It means the internal counters will be set during the same commit;
* but it will eventually schedule a separate thread in which will retry some times to consolidate
@@ -132,22 +132,22 @@ public class AtomicCounterEditor extends DefaultEditor {
* property to be set for incrementing/decrementing the counter
*/
public static final String PROP_INCREMENT = "oak:increment";
-
+
/**
* property with the consolidated counter
*/
public static final String PROP_COUNTER = "oak:counter";
-
+
/**
* prefix used internally for tracking the counting requests
*/
public static final String PREFIX_PROP_COUNTER = ":oak-counter-";
-
+
/**
* prefix used internally for tracking the cluster node related revision numbers
*/
public static final String PREFIX_PROP_REVISION = ":rev-";
-
+
private static final Logger LOG = LoggerFactory.getLogger(AtomicCounterEditor.class);
private final NodeBuilder builder;
private final String path;
@@ -155,22 +155,22 @@ public class AtomicCounterEditor extends DefaultEditor {
private final ScheduledExecutorService executor;
private final NodeStore store;
private final Whiteboard board;
-
+
/**
* the current counter property name
*/
private final String counterName;
-
+
/**
* the current revision property name
*/
private final String revisionName;
-
+
/**
* instruct whether to update the node on leave.
*/
private boolean update;
-
+
/**
*
* Create an instance of the editor for atomic increments. It can works synchronously as well as
@@ -182,14 +182,14 @@ public class AtomicCounterEditor extends DefaultEditor {
* be found in the whiteboard, a {@link EmptyHook} will be provided to the {@link NodeStore} for
* merging.
*
- *
+ *
* @param builder the build on which to work. Cannot be null.
* @param instanceId the current Oak instance Id. If null editor will be synchronous.
* @param executor the current Oak executor service. If null editor will be synchronous.
* @param store the current Oak node store. If null the editor will be synchronous.
* @param board the current Oak {@link Whiteboard}.
*/
- public AtomicCounterEditor(@NotNull final NodeBuilder builder,
+ public AtomicCounterEditor(@NotNull final NodeBuilder builder,
@Nullable String instanceId,
@Nullable ScheduledExecutorService executor,
@Nullable NodeStore store,
@@ -197,9 +197,9 @@ public class AtomicCounterEditor extends DefaultEditor {
this("", checkNotNull(builder), instanceId, executor, store, board);
}
- private AtomicCounterEditor(final String path,
- final NodeBuilder builder,
- @Nullable String instanceId,
+ private AtomicCounterEditor(final String path,
+ final NodeBuilder builder,
+ @Nullable String instanceId,
@Nullable ScheduledExecutorService executor,
@Nullable NodeStore store,
@Nullable Whiteboard board) {
@@ -209,8 +209,8 @@ public class AtomicCounterEditor extends DefaultEditor {
this.executor = executor;
this.store = store;
this.board = board;
-
- counterName = instanceId == null ? PREFIX_PROP_COUNTER :
+
+ counterName = instanceId == null ? PREFIX_PROP_COUNTER :
PREFIX_PROP_COUNTER + instanceId;
revisionName = instanceId == null ? PREFIX_PROP_REVISION :
PREFIX_PROP_REVISION + instanceId;
@@ -234,20 +234,20 @@ public class AtomicCounterEditor extends DefaultEditor {
}
return process;
}
-
+
/**
*
* consolidate the {@link #PREFIX_PROP_COUNTER} properties and sum them into the
* {@link #PROP_COUNTER}
*
- *
+ *
*
* The passed in {@code NodeBuilder} must have
* {@link org.apache.jackrabbit.JcrConstants#JCR_MIXINTYPES JCR_MIXINTYPES} with
* {@link NodeTypeConstants#MIX_ATOMIC_COUNTER MIX_ATOMIC_COUNTER}.
* If not it will be silently ignored.
*
- *
+ *
* @param builder the builder to work on. Cannot be null.
*/
public static void consolidateCount(@NotNull final NodeBuilder builder) {
@@ -263,27 +263,27 @@ public class AtomicCounterEditor extends DefaultEditor {
private void setUniqueCounter(final long value) {
update = true;
-
+
PropertyState counter = builder.getProperty(counterName);
PropertyState revision = builder.getProperty(revisionName);
-
+
long currentValue = 0;
if (counter != null) {
currentValue = counter.getValue(LONG);
}
-
+
long currentRevision = 0;
if (revision != null) {
currentRevision = revision.getValue(LONG);
}
-
+
currentValue += value;
currentRevision += 1;
builder.setProperty(counterName, currentValue, LONG);
builder.setProperty(revisionName, currentRevision, LONG);
}
-
+
@Override
public void propertyAdded(final PropertyState after) throws CommitFailedException {
if (shallWeProcessProperty(after, path, builder)) {
@@ -299,8 +299,8 @@ public class AtomicCounterEditor extends DefaultEditor {
}
@Override
- public Editor childNodeChanged(final String name,
- final NodeState before,
+ public Editor childNodeChanged(final String name,
+ final NodeState before,
final NodeState after) throws CommitFailedException {
return new AtomicCounterEditor(path + '/' + name, builder.getChildNode(name), instanceId,
executor, store, board);
@@ -314,7 +314,7 @@ public class AtomicCounterEditor extends DefaultEditor {
"Executing synchronously. instanceId: {}, store: {}, executor: {}, board: {}",
new Object[] { instanceId, store, executor, board });
consolidateCount(builder);
- } else {
+ } else {
CommitHook hook = WhiteboardUtils.getService(board, CommitHook.class);
if (hook == null) {
LOG.trace("CommitHook not registered with Whiteboard. Falling back to sync.");
@@ -322,31 +322,31 @@ public class AtomicCounterEditor extends DefaultEditor {
} else {
long delay = 500;
ConsolidatorTask t = new ConsolidatorTask(
- path,
- builder.getProperty(revisionName),
- store,
- executor,
- delay,
+ path,
+ builder.getProperty(revisionName),
+ store,
+ executor,
+ delay,
hook);
- LOG.debug("[{}] Scheduling process by {}ms", t.getName(), delay);
- executor.schedule(t, delay, TimeUnit.MILLISECONDS);
+ LOG.debug("[{}] Scheduling process by {}ms", t.getName(), delay);
+ executor.schedule(t, delay, TimeUnit.MILLISECONDS);
}
}
}
}
-
+
public static class ConsolidatorTask implements Callable {
/**
* millis over which the task will timeout
*/
public static final long MAX_TIMEOUT = Long
.getLong("oak.atomiccounter.task.timeout", 32000);
-
+
/**
- * millis below which the next delay will schedule at this amount.
+ * millis below which the next delay will schedule at this amount.
*/
public static final long MIN_TIMEOUT = 500;
-
+
private final String name;
private final String p;
private final PropertyState rev;
@@ -355,9 +355,9 @@ public class AtomicCounterEditor extends DefaultEditor {
private final long delay;
private final long start;
private final CommitHook hook;
-
- public ConsolidatorTask(@NotNull String path,
- @Nullable PropertyState revision,
+
+ public ConsolidatorTask(@NotNull String path,
+ @Nullable PropertyState revision,
@NotNull NodeStore store,
@NotNull ScheduledExecutorService exec,
long delay,
@@ -383,23 +383,23 @@ public class AtomicCounterEditor extends DefaultEditor {
this.name = task.name;
this.start = task.start;
}
-
+
@Override
- public Void call() throws Exception {
+ public Void call() throws Exception {
try {
LOG.debug("[{}] Async consolidation running: path: {}, revision: {}", name, p, rev);
NodeBuilder root = s.getRoot().builder();
NodeBuilder b = builderFromPath(root, p);
-
+
dumpNode(b, p);
-
+
if (!b.exists()) {
LOG.debug("[{}] Builder for '{}' from NodeStore not available. Rescheduling.",
name, p);
reschedule();
return null;
}
-
+
if (!checkRevision(b, rev)) {
LOG.debug("[{}] Missing or not yet a valid revision for '{}'. Rescheduling.",
name, p);
@@ -423,12 +423,12 @@ public class AtomicCounterEditor extends DefaultEditor {
reschedule();
return null;
}
-
+
LOG.debug("[{}] Consolidation for '{}', '{}' completed in {}ms", name, p, rev,
System.currentTimeMillis() - start);
return null;
}
-
+
private void dumpNode(@NotNull NodeBuilder b, String path) {
if (LOG.isTraceEnabled()) {
checkNotNull(b);
@@ -439,7 +439,7 @@ public class AtomicCounterEditor extends DefaultEditor {
LOG.trace("[{}] Node status for {}:\n{}", this.name, path, s);
}
}
-
+
private void reschedule() {
long d = nextDelay(delay);
if (isTimedOut(d)) {
@@ -447,12 +447,12 @@ public class AtomicCounterEditor extends DefaultEditor {
name, p);
return;
}
-
+
ConsolidatorTask task = new ConsolidatorTask(this, d);
LOG.debug("[{}] Rescheduling '{}' by {}ms", task.getName(), p, d);
exec.schedule(task, d, TimeUnit.MILLISECONDS);
}
-
+
public static long nextDelay(long currentDelay) {
if (currentDelay < MIN_TIMEOUT) {
return MIN_TIMEOUT;
@@ -462,24 +462,24 @@ public class AtomicCounterEditor extends DefaultEditor {
}
return currentDelay * 2;
}
-
+
public static boolean isTimedOut(long delay) {
return delay > MAX_TIMEOUT;
}
-
+
public String getName() {
return name;
}
}
-
+
/**
* checks that the revision provided in the PropertyState is less or equal than the one within
* the builder.
- *
+ *
* if {@code revision} is null it will always be {@code true}.
- *
+ *
* If {@code builder} does not contain the property it will always return false.
- *
+ *
* @param builder
* @param revision
* @return
@@ -493,16 +493,16 @@ public class AtomicCounterEditor extends DefaultEditor {
if (builderRev == null) {
return false;
}
-
+
long brValue = builderRev.getValue(Type.LONG);
long rValue = revision.getValue(Type.LONG);
-
+
if (brValue >= rValue) {
return true;
}
return false;
}
-
+
private static NodeBuilder builderFromPath(@NotNull NodeBuilder ancestor, @NotNull String path) {
NodeBuilder b = checkNotNull(ancestor);
for (String name : PathUtils.elements(checkNotNull(path))) {
@@ -510,12 +510,12 @@ public class AtomicCounterEditor extends DefaultEditor {
}
return b;
}
-
+
/**
* check whether the provided builder has to be consolidated or not. A node has to be
* consolidate if the sum of all the hidden counter does not match the exposed one. It could
* happen that some other nodes previously saw our change and already consolidated it.
- *
+ *
* @param b the builde to check. Canno be null.
* @return true if the sum of the hidden counters does not match the exposed one.
*/
@@ -525,14 +525,14 @@ public class AtomicCounterEditor extends DefaultEditor {
if (counter == null) {
counter = LongPropertyState.createLongProperty(PROP_COUNTER, 0);
}
-
+
long hiddensum = 0;
for (PropertyState p : b.getProperties()) {
if (p.getName().startsWith(PREFIX_PROP_COUNTER)) {
hiddensum += p.getValue(LONG);
}
}
-
+
return counter.getValue(LONG) != hiddensum;
}
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/tree/impl/ImmutableTree.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/tree/impl/ImmutableTree.java
index 75f8e3f729..5709be2b73 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/tree/impl/ImmutableTree.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/tree/impl/ImmutableTree.java
@@ -34,7 +34,7 @@ import org.jetbrains.annotations.Nullable;
* Immutable implementation of the {@code Tree} interface in order to provide
* the much feature rich API functionality for a given {@code NodeState}.
*
- * Tree hierarchy
+ * Tree hierarchy
* Due to the nature of this {@code Tree} implementation creating a proper
* hierarchical view of the tree structure is the responsibility of the caller.
* It is recommended to start with the state of the
@@ -49,7 +49,7 @@ import org.jetbrains.annotations.Nullable;
* {@link #ImmutableTree(ImmutableTree.ParentProvider, String, org.apache.jackrabbit.oak.spi.state.NodeState)}
* an specify an appropriate {@code ParentProvider} implementation.
*
- * ParentProvider
+ * ParentProvider
* Apart from create the tree hierarchy in traversal mode this tree implementation
* allows to instantiate disconnected trees that depending on the use may
* never or on demand retrieve hierarchy information. The following default
@@ -64,13 +64,13 @@ import org.jetbrains.annotations.Nullable;
* upon hierarchy related methods like {@link #getParent()}, {@link #getPath()}
*
*
- * Filtering 'hidden' items
+ * Filtering 'hidden' items
* This {@code Tree} implementation reflects the item hierarchy as exposed by the
* underlying {@code NodeState}. In contrast to the mutable implementations it
* does not filter out 'hidden' items as identified by
* {@code org.apache.jackrabbit.oak.spi.state.NodeStateUtils#isHidden(String)}.
*
- * Equality and hash code
+ * Equality and hash code
* In contrast to {@link org.apache.jackrabbit.oak.plugins.tree.impl.AbstractMutableTree}
* the {@code ImmutableTree} implements
* {@link Object#equals(Object)} and {@link Object#hashCode()}: Two {@code ImmutableTree}s
@@ -111,11 +111,13 @@ public final class ImmutableTree extends AbstractTree implements TreeTypeAware,
}
//----------------------------------------------------------< TypeAware >---
+ @Override
@Nullable
public TreeType getType() {
return type;
}
+ @Override
public void setType(@NotNull TreeType type) {
this.type = type;
}
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/AuthenticationConfigurationImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/AuthenticationConfigurationImpl.java
index f95466f379..286ead35cb 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/AuthenticationConfigurationImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/AuthenticationConfigurationImpl.java
@@ -132,7 +132,7 @@ public class AuthenticationConfigurationImpl extends ConfigurationBase implement
* related validation is omitted
*
*
- * Configuration Options
+ * Configuration Options
*
* - {@link #PARAM_APP_NAME}: The appName passed to
* {@code Configuration#getAppConfigurationEntry(String)}. The default
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/user/LoginModuleImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/user/LoginModuleImpl.java
index fccb013f53..2c393bfa5c 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/user/LoginModuleImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/user/LoginModuleImpl.java
@@ -55,7 +55,7 @@ import java.util.stream.Stream;
* associated with user are retrieved from a configurable
* {@link org.apache.jackrabbit.oak.spi.security.principal.PrincipalProvider}.
*
- *
Credentials
+ * Credentials
*
* The {@code Credentials} are collected during {@link #login()} using the
* following logic:
@@ -82,12 +82,12 @@ import java.util.stream.Stream;
* The {@link Credentials} obtained during the {@code #login()} are added to
* the shared state and - upon successful {@code #commit()} to the {@code Subject}.
*
- * Principals
+ * Principals
* Upon successful login the principals associated with the user are calculated
* (see also {@link AbstractLoginModule#getPrincipals(String)}. These principals
* are finally added to the subject during {@code #commit()}.
*
- * Impersonation
+ * Impersonation
* Impersonation such as defined by {@link javax.jcr.Session#impersonate(javax.jcr.Credentials)}
* is covered by this login module by the means of {@link ImpersonationCredentials}.
* Impersonation will succeed if the {@link ImpersonationCredentials#getBaseCredentials() base credentials}
diff --git a/oak-exercise/src/main/java/org/apache/jackrabbit/oak/exercise/security/authorization/models/readonly/ReadOnlyAuthorizationConfiguration.java b/oak-exercise/src/main/java/org/apache/jackrabbit/oak/exercise/security/authorization/models/readonly/ReadOnlyAuthorizationConfiguration.java
index e501c07b04..7fbf7c463d 100644
--- a/oak-exercise/src/main/java/org/apache/jackrabbit/oak/exercise/security/authorization/models/readonly/ReadOnlyAuthorizationConfiguration.java
+++ b/oak-exercise/src/main/java/org/apache/jackrabbit/oak/exercise/security/authorization/models/readonly/ReadOnlyAuthorizationConfiguration.java
@@ -74,12 +74,12 @@ import java.util.Set;
import static org.apache.jackrabbit.oak.spi.security.RegistrationConstants.OAK_SECURITY_NAME;
/**
- * Read Only Authorization Model
+ * Read Only Authorization Model
*
* This authorization module forms part of the training material provided by the
* oak-exercise module and must not be used in a productive environment!
*
- * Overview
+ * Overview
* This simplistic authorization model is limited to permission evaluation and
* doesn't support access control management.
*
@@ -90,7 +90,7 @@ import static org.apache.jackrabbit.oak.spi.security.RegistrationConstants.OAK_S
* There exists a single exception to that rule: For the internal {@link SystemPrincipal}
* permission evaluation is not enforced by this module i.e. this module is skipped.
*
- * Intended Usage
+ * Intended Usage
* This authorization model is intended to be used in 'AND' combination with the
* default authorization setup defined by Oak (and optionally additional models
* such as e.g. oak-authorization-cug.
@@ -98,12 +98,12 @@ import static org.apache.jackrabbit.oak.spi.security.RegistrationConstants.OAK_S
* It is not intended to be used as standalone model as it would grant full read
* access to everyone.
*
- * Limitations
+ * Limitations
* Experimental model for training purpose and not intended for usage in production.
*
- * Key Features
+ * Key Features
*
- * Access Control Management
+ * Access Control Management
*
*
*
@@ -117,7 +117,7 @@ import static org.apache.jackrabbit.oak.spi.security.RegistrationConstants.OAK_S
* | Effective Policies by Principals | for every set of principals a single effective policy of type {@link NamedAccessControlPolicy} |
*
*
- * Permission Evaluation
+ * Permission Evaluation
*
*
*
@@ -126,20 +126,20 @@ import static org.apache.jackrabbit.oak.spi.security.RegistrationConstants.OAK_S
* | Aggregated Permission Provider | yes |
*
*
- * Representation in the Repository
+ * Representation in the Repository
*
* There exists no dedicated access control or permission content for this
* authorization model as it doesn't persist any information into the repository.
* {@link SecurityConfiguration#getContext()} therefore returns the {@link Context#DEFAULT default}.
*
- * Configuration
+ * Configuration
*
* This model comes with a single mandatory configurable property:
*
* - configurationRanking : {@link CompositeConfiguration#PARAM_RANKING}, no default value.
*
*
- * Installation Instructions
+ * Installation Instructions
*
* The following steps are required to install this authorization model in an OSGi based Oak setup.
*
diff --git a/oak-security-spi/src/main/java/org/apache/jackrabbit/oak/spi/security/principal/PrincipalConfiguration.java b/oak-security-spi/src/main/java/org/apache/jackrabbit/oak/spi/security/principal/PrincipalConfiguration.java
index c32f4caf72..c94340521c 100644
--- a/oak-security-spi/src/main/java/org/apache/jackrabbit/oak/spi/security/principal/PrincipalConfiguration.java
+++ b/oak-security-spi/src/main/java/org/apache/jackrabbit/oak/spi/security/principal/PrincipalConfiguration.java
@@ -45,8 +45,8 @@ public interface PrincipalConfiguration extends SecurityConfiguration {
/**
* Returns an instance of the OAK {@link PrincipalProvider}.
*
- * Backwards compatibility with Jackrabbit 2.x
- * Configuration of Principal Providers
+ * Backwards compatibility with Jackrabbit 2.x
+ * Configuration of Principal Providers
* In Jackrabbit 2.x the configuration of principal providers was tied to
* the LoginModule configuration and thus mixing authentication concerns
* with the principal management. Since OAK makes the {@code PrincipalProvider}
@@ -56,7 +56,7 @@ public interface PrincipalConfiguration extends SecurityConfiguration {
* {@link org.apache.jackrabbit.oak.spi.security.SecurityProvider} is
* made available in the {@link org.apache.jackrabbit.oak.spi.security.authentication.AuthenticationConfiguration}.
*
- * Multiple Sources for Principals
+ * Multiple Sources for Principals
* In Jackrabbit 2.x it was possible to configure multiple principal providers.
* As of OAK there is only one single principal provider implementation
* responsible for a given workspace. If principals originate from different
diff --git a/oak-store-composite/src/main/java/org/apache/jackrabbit/oak/composite/package-info.java b/oak-store-composite/src/main/java/org/apache/jackrabbit/oak/composite/package-info.java
index 4b28a62585..3f39c7541d 100644
--- a/oak-store-composite/src/main/java/org/apache/jackrabbit/oak/composite/package-info.java
+++ b/oak-store-composite/src/main/java/org/apache/jackrabbit/oak/composite/package-info.java
@@ -15,7 +15,7 @@
* limitations under the License.
*/
/**
- * Composition support
+ * Composition support
*
* This package contains support classes for implementing a composite persistence at the {@linkplain org.apache.jackrabbit.oak.spi.state.NodeStore} level.
*
@@ -28,7 +28,7 @@
* - Negligible performance impact. Composition should not add a significat performance overhead.
*
*
- * Implementation
+ * Implementation
*
* The main entry point is the {@link org.apache.jackrabbit.oak.composite.CompositeNodeStore},
* which wraps one or more NodeStore instances. Also of interest are the {@link org.apache.jackrabbit.oak.composite.CompositeNodeState} and {@link org.apache.jackrabbit.oak.composite.CompositeNodeBuilder}.
@@ -41,7 +41,7 @@
*
Using this approach allows us to always keep related NodeStore, NodeState and NodeBuilder
* instances isolated from other instances.
*
- *
Open items
+ * Open items
*
* 1. Brute-force support for oak:mount nodes.
*
diff --git a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
index 15676af7a7..98661b5efe 100755
--- a/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
+++ b/oak-store-document/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
@@ -99,8 +99,8 @@ import com.google.common.collect.Sets;
/**
* Implementation of {@link DocumentStore} for relational databases.
- *
- *
Supported Databases
+ *
+ * Supported Databases
*
* The code is supposed to be sufficiently generic to run with a variety of
* database implementations. However, the tables are created when required to
@@ -115,8 +115,8 @@ import com.google.common.collect.Sets;
*
- Microsoft SQL Server
* - Oracle
*
- *
- * Table Layout
+ *
+ * Table Layout
*
* Data for each of the DocumentStore's {@link Collection}s is stored in its own
* database table (with a name matching the collection).
@@ -203,7 +203,7 @@ import com.google.common.collect.Sets;
* testing, as tables can also be dropped automatically when the store is
* disposed (this only happens for those tables that have been created on
* demand).
- *
Versioning
+ * Versioning
*
* The initial database layout used in OAK 1.0 through 1.6 is version 0.
*
@@ -216,7 +216,7 @@ import com.google.common.collect.Sets;
* The code deals with both version 0, version 1 and version 2 table layouts. By
* default, it tries to create version 2 tables, and also tries to upgrade
* existing version 0 and 1 tables to version 2.
- *
DB-specific information
+ * DB-specific information
*
* Databases need to be configured so that:
*
@@ -228,7 +228,7 @@ import com.google.common.collect.Sets;
* See the
* RDBDocumentStore documentation
* for more information.
- * Table Creation
+ * Table Creation
*
* The code tries to create the tables when they are not present. Likewise, it
* tries to upgrade to a newer schema when needed.
@@ -237,12 +237,12 @@ import com.google.common.collect.Sets;
* create them "manually". The oak-run "rdbddldump"
* command can be used to print out the DDL statements that would have been used for auto-creation
* and/or automatic schema updates.
- *
- *
Caching
+ *
+ * Caching
*
* The cache borrows heavily from the {@link MongoDocumentStore} implementation.
- *
- *
Queries
+ *
+ * Queries
*
* The implementation currently supports only three indexed properties: "_bin",
* "deletedOnce", and "_modified". Attempts to use a different indexed property
@@ -1269,7 +1269,7 @@ public class RDBDocumentStore implements DocumentStore {
}
return result;
}
-
+
private static class IndexInformation {
public Map fields;
public boolean nonunique;
@@ -1473,7 +1473,7 @@ public class RDBDocumentStore implements DocumentStore {
return wasChanged;
}
-
+
private static void getTableMetaData(Connection con, Collection extends Document> col, RDBTableMetaData tmd) throws SQLException {
Statement checkStatement = null;
ResultSet checkResultSet = null;