Index: src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java (revision 706270)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/MultiIndex.java (working copy)
@@ -35,7 +35,6 @@
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
-import org.apache.commons.collections.iterators.EmptyIterator;
import javax.jcr.RepositoryException;
import java.io.IOException;
@@ -50,6 +49,7 @@
import java.util.HashMap;
import java.util.Map;
import java.util.Collection;
+import java.util.Collections;
/**
* A MultiIndex consists of a {@link VolatileIndex} and multiple
@@ -406,14 +406,18 @@
* Atomically updates the index by removing some documents and adding
* others.
*
- * @param remove Iterator of UUIDs that identify documents to
+ * @param remove collection of UUIDs that identify documents to
* remove
- * @param add Iterator of Documents to add. Calls to
- * next() on this iterator may return
- * null, to indicate that a node could not be
- * indexed successfully.
+ * @param add collection of Documents to add. Some of the
+ * elements in this collection may be null, to
+ * indicate that a node could not be indexed successfully.
*/
- synchronized void update(Iterator remove, Iterator add) throws IOException {
+ synchronized void update(Collection remove, Collection add) throws IOException {
+ // make sure a reader is available during long updates
+ if (add.size() > handler.getBufferSize()) {
+ getIndexReader().release();
+ }
+
synchronized (updateMonitor) {
updateInProgress = true;
}
@@ -422,11 +426,11 @@
executeAndLog(new Start(transactionId));
boolean flush = false;
- while (remove.hasNext()) {
- executeAndLog(new DeleteNode(transactionId, (UUID) remove.next()));
+ for (Iterator it = remove.iterator(); it.hasNext(); ) {
+ executeAndLog(new DeleteNode(transactionId, (UUID) it.next()));
}
- while (add.hasNext()) {
- Document doc = (Document) add.next();
+ for (Iterator it = add.iterator(); it.hasNext(); ) {
+ Document doc = (Document) it.next();
if (doc != null) {
executeAndLog(new AddNode(transactionId, doc));
// commit volatile index if needed
@@ -456,8 +460,7 @@
* index.
*/
void addDocument(Document doc) throws IOException {
- List add = Arrays.asList(new Document[]{doc});
- update(EmptyIterator.INSTANCE, add.iterator());
+ update(Collections.EMPTY_LIST, Arrays.asList(new Document[]{doc}));
}
/**
@@ -467,8 +470,7 @@
* @throws IOException if an error occurs while deleting the document.
*/
void removeDocument(UUID uuid) throws IOException {
- List remove = Arrays.asList(new UUID[]{uuid});
- update(remove.iterator(), EmptyIterator.INSTANCE);
+ update(Arrays.asList(new UUID[]{uuid}), Collections.EMPTY_LIST);
}
/**
@@ -1196,8 +1198,7 @@
}
try {
- update(finished.keySet().iterator(),
- finished.values().iterator());
+ update(finished.keySet(), finished.values());
} catch (IOException e) {
// update failed
log.warn("Failed to update index with deferred text extraction", e);
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java (revision 706270)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java (working copy)
@@ -56,7 +56,10 @@
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
-import org.apache.commons.collections.iterators.AbstractIteratorDecorator;
+import org.apache.commons.collections.iterators.TransformIterator;
+import org.apache.commons.collections.collection.TransformedCollection;
+import org.apache.commons.collections.IteratorUtils;
+import org.apache.commons.collections.Transformer;
import org.xml.sax.SAXException;
import org.w3c.dom.Element;
@@ -77,6 +80,7 @@
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
+import java.util.Collection;
/**
* Implements a {@link org.apache.jackrabbit.core.query.QueryHandler} using
@@ -520,7 +524,7 @@
/**
* This implementation forwards the call to
- * {@link MultiIndex#update(java.util.Iterator, java.util.Iterator)} and
+ * {@link MultiIndex#update(Collection, Collection)} and
* transforms the two iterators to the required types.
*
* @param remove uuids of nodes to remove.
@@ -534,62 +538,66 @@
throws RepositoryException, IOException {
checkOpen();
final Map aggregateRoots = new HashMap();
- final Set removedNodeIds = new HashSet();
- final Set addedNodeIds = new HashSet();
- index.update(new AbstractIteratorDecorator(remove) {
- public Object next() {
- NodeId nodeId = (NodeId) super.next();
- removedNodeIds.add(nodeId);
- return nodeId.getUUID();
- }
- }, new AbstractIteratorDecorator(add) {
- public Object next() {
- NodeState state = (NodeState) super.next();
- if (state == null) {
- return null;
- }
- addedNodeIds.add(state.getNodeId());
- removedNodeIds.remove(state.getNodeId());
- Document doc = null;
- try {
- doc = createDocument(state, getNamespaceMappings(),
- index.getIndexFormatVersion());
- retrieveAggregateRoot(state, aggregateRoots);
- } catch (RepositoryException e) {
- log.warn("Exception while creating document for node: "
- + state.getNodeId() + ": " + e.toString());
- }
- return doc;
- }
- });
+ final HashSet removedUUIDs = new HashSet();
+ final Set addedUUIDs = new HashSet();
+ index.update(IteratorUtils.toList(new TransformIterator(remove,
+ new Transformer() {
+ public Object transform(Object input) {
+ UUID uuid = ((NodeId) input).getUUID();
+ removedUUIDs.add(uuid);
+ return uuid;
+ }
+ })), IteratorUtils.toList(new TransformIterator(add,
+ new Transformer() {
+ public Object transform(Object input) {
+ NodeState state = (NodeState) input;
+ if (state == null) {
+ return null;
+ }
+ UUID uuid = state.getNodeId().getUUID();
+ addedUUIDs.add(uuid);
+ removedUUIDs.remove(uuid);
+ Document doc = null;
+ try {
+ doc = createDocument(state, getNamespaceMappings(),
+ index.getIndexFormatVersion());
+ retrieveAggregateRoot(state, aggregateRoots);
+ } catch (RepositoryException e) {
+ log.warn("Exception while creating document for node: "
+ + state.getNodeId() + ": " + e.toString());
+ }
+ return doc;
+ }
+ })));
+
// remove any aggregateRoot nodes that are new
// and therefore already up-to-date
- aggregateRoots.keySet().removeAll(addedNodeIds);
+ aggregateRoots.keySet().removeAll(addedUUIDs);
- // based on removed NodeIds get affected aggregate root nodes
- retrieveAggregateRoot(removedNodeIds, aggregateRoots);
+ // based on removed UUIDs get affected aggregate root nodes
+ retrieveAggregateRoot(removedUUIDs, aggregateRoots);
// update aggregates if there are any affected
if (aggregateRoots.size() > 0) {
- index.update(new AbstractIteratorDecorator(
- aggregateRoots.keySet().iterator()) {
- public Object next() {
- return ((NodeId) super.next()).getUUID();
- }
- }, new AbstractIteratorDecorator(aggregateRoots.values().iterator()) {
- public Object next() {
- NodeState state = (NodeState) super.next();
- try {
- return createDocument(state, getNamespaceMappings(),
- index.getIndexFormatVersion());
- } catch (RepositoryException e) {
- log.warn("Exception while creating document for node: "
- + state.getNodeId() + ": " + e.toString());
- }
- return null;
- }
- });
+ Collection modified = TransformedCollection.decorate(
+ new ArrayList(),
+ new Transformer() {
+ public Object transform(Object input) {
+ NodeState state = (NodeState) input;
+ try {
+ return createDocument(state,
+ getNamespaceMappings(),
+ index.getIndexFormatVersion());
+ } catch (RepositoryException e) {
+ log.warn("Exception while creating document for node: "
+ + state.getNodeId() + ": " + e.toString());
+ }
+ return null;
+ }
+ });
+ modified.addAll(aggregateRoots.values());
+ index.update(aggregateRoots.keySet(), modified);
}
}
@@ -1141,7 +1149,7 @@
*
* @param state the node state for which we want to retrieve the aggregate
* root.
- * @param map aggregate roots are collected in this map. Key=NodeId,
+ * @param map aggregate roots are collected in this map. Key=UUID,
* value=NodeState.
*/
protected void retrieveAggregateRoot(NodeState state, Map map) {
@@ -1154,7 +1162,7 @@
for (int i = 0; i < aggregateRules.length; i++) {
NodeState root = aggregateRules[i].getAggregateRoot(state);
if (root != null) {
- map.put(root.getNodeId(), root);
+ map.put(root.getNodeId().getUUID(), root);
break;
}
}
@@ -1166,14 +1174,14 @@
}
/**
- * Retrieves the root of the indexing aggregate for removedNodeIds
+ * Retrieves the root of the indexing aggregate for removedUUIDs
* and puts it into map.
*
- * @param removedNodeIds the ids of removed nodes.
+ * @param removedUUIDs the UUIDs of removed nodes.
* @param map aggregate roots are collected in this map.
- * Key=NodeId, value=NodeState.
+ * Key=UUID, value=NodeState.
*/
- protected void retrieveAggregateRoot(Set removedNodeIds, Map map) {
+ protected void retrieveAggregateRoot(Set removedUUIDs, Map map) {
if (indexingConfig != null) {
AggregateRule[] aggregateRules = indexingConfig.getAggregateRules();
if (aggregateRules == null) {
@@ -1189,17 +1197,17 @@
TermDocs tDocs = reader.termDocs();
try {
ItemStateManager ism = getContext().getItemStateManager();
- Iterator it = removedNodeIds.iterator();
+ Iterator it = removedUUIDs.iterator();
while (it.hasNext()) {
- NodeId id = (NodeId) it.next();
+ UUID uuid = (UUID) it.next();
aggregateUUIDs = aggregateUUIDs.createTerm(
- id.getUUID().toString());
+ uuid.toString());
tDocs.seek(aggregateUUIDs);
while (tDocs.next()) {
Document doc = reader.document(tDocs.doc(), FieldSelectors.UUID);
- String uuid = doc.get(FieldNames.UUID);
- NodeId nId = new NodeId(UUID.fromString(uuid));
- map.put(nId, ism.getItemState(nId));
+ NodeId nId = new NodeId(UUID.fromString(
+ doc.get(FieldNames.UUID)));
+ map.put(nId.getUUID(), ism.getItemState(nId));
found++;
}
}