Index: lucene/core/src/java/org/apache/lucene/index/ParallelAtomicReader.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/ParallelAtomicReader.java (revision 0)
+++ lucene/core/src/java/org/apache/lucene/index/ParallelAtomicReader.java (working copy)
@@ -0,0 +1,286 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.IdentityHashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.lucene.util.Bits;
+
+
+/** An {@link AtomicReader} which reads multiple, parallel indexes. Each index added
+ * must have the same number of documents, but typically each contains
+ * different fields. Each document contains the union of the fields of all
+ * documents with the same document number. When searching, matches for a
+ * query term are from the first index added that has the field.
+ *
+ *
This is useful, e.g., with collections that have large fields which
+ * change rarely and small fields that change more frequently. The smaller
+ * fields may be re-indexed in a new index and both indexes may be searched
+ * together.
+ *
+ *
Warning: It is up to you to make sure all indexes
+ * are created and modified the same way. For example, if you add
+ * documents to one index, you need to add the same documents in the
+ * same order to the other indexes. Failure to do so will result in
+ * undefined behavior.
+ */
+public final class ParallelAtomicReader extends AtomicReader {
+ private final FieldInfos fieldInfos = new FieldInfos();
+ private final ParallelFields fields = new ParallelFields();
+ private final AtomicReader[] parallelReaders, storedFieldsReaders;
+ private final boolean closeSubReaders;
+ private final int maxDoc, numDocs;
+ private final boolean hasDeletions;
+ final SortedMap fieldToReader = new TreeMap();
+
+ // nocommit jdoc
+ public ParallelAtomicReader(AtomicReader... readers) throws IOException {
+ this(true, readers, readers);
+ }
+
+ // nocommit jdoc
+ public ParallelAtomicReader(boolean closeSubReaders, AtomicReader[] readers, AtomicReader[] storedFieldsReaders) throws IOException {
+ this.closeSubReaders = closeSubReaders;
+ assert readers.length >= storedFieldsReaders.length;
+ this.parallelReaders = readers;
+ this.storedFieldsReaders = storedFieldsReaders;
+ this.numDocs = (readers.length > 0) ? readers[0].numDocs() : 0;
+ this.maxDoc = (readers.length > 0) ? readers[0].maxDoc() : 0;
+ this.hasDeletions = (readers.length > 0) ? readers[0].hasDeletions() : false;
+
+ // check compatibility
+ for(AtomicReader reader : readers) {
+ validate(reader);
+ }
+ for(AtomicReader reader : storedFieldsReaders) {
+ validate(reader);
+ }
+
+ validateSubset(readers, storedFieldsReaders);
+
+ for (final AtomicReader reader : readers) {
+ final FieldInfos readerFieldInfos = reader.getFieldInfos();
+ for(FieldInfo fieldInfo : readerFieldInfos) { // update fieldToReader map
+ // NOTE: first reader having a given field "wins":
+ if (fieldToReader.get(fieldInfo.name) == null) {
+ fieldInfos.add(fieldInfo);
+ fieldToReader.put(fieldInfo.name, reader);
+ this.fields.addField(fieldInfo.name, reader.terms(fieldInfo.name));
+ }
+ }
+ if (!closeSubReaders) {
+ reader.incRef();
+ }
+ }
+ }
+
+ private void validate(AtomicReader reader) {
+ if (reader.maxDoc() != maxDoc) {
+ throw new IllegalArgumentException("All readers must have same maxDoc: "+maxDoc+"!="+reader.maxDoc());
+ }
+ if (reader.numDocs() != numDocs) {
+ throw new IllegalArgumentException("All readers must have same numDocs: "+numDocs+"!="+reader.numDocs());
+ }
+ }
+
+ // For now, we require that the storedFieldReaders are
+ // also included in readers; in theory we could relax
+ // this.
+ static void validateSubset(AtomicReader[] readers, AtomicReader[] storedFieldsReaders) {
+ final Set readersSet = Collections.newSetFromMap(new IdentityHashMap());
+ for(AtomicReader reader : readers) {
+ readersSet.add(reader);
+ }
+ for(AtomicReader storedReader : storedFieldsReaders) {
+ if (!readersSet.contains(storedReader)) {
+ throw new IllegalArgumentException("each reader in storedFieldsReaders must also be in the readers array (" + storedReader + " is not)");
+ }
+ }
+ }
+
+ @Override
+ public String toString() {
+ final StringBuilder buffer = new StringBuilder("ParallelAtomicReader(");
+ for (final Iterator iter = Arrays.asList(parallelReaders).iterator(); iter.hasNext();) {
+ buffer.append(iter.next());
+ if (iter.hasNext()) buffer.append(", ");
+ }
+ return buffer.append(')').toString();
+ }
+
+ private final class ParallelFieldsEnum extends FieldsEnum {
+ private String currentField;
+ private final Iterator keys;
+ private final Fields fields;
+
+ ParallelFieldsEnum(Fields fields) {
+ this.fields = fields;
+ keys = fieldToReader.keySet().iterator();
+ }
+
+ @Override
+ public String next() throws IOException {
+ if (keys.hasNext()) {
+ currentField = keys.next();
+ } else {
+ currentField = null;
+ }
+ return currentField;
+ }
+
+ @Override
+ public Terms terms() throws IOException {
+ return fields.terms(currentField);
+ }
+
+ }
+
+ // Single instance of this, per ParallelReader instance
+ private final class ParallelFields extends Fields {
+ final HashMap fields = new HashMap();
+
+ ParallelFields() {
+ }
+
+ void addField(String fieldName, Terms terms) throws IOException {
+ fields.put(fieldName, terms);
+ }
+
+ @Override
+ public FieldsEnum iterator() throws IOException {
+ return new ParallelFieldsEnum(this);
+ }
+
+ @Override
+ public Terms terms(String field) throws IOException {
+ return fields.get(field);
+ }
+
+ @Override
+ public int getUniqueFieldCount() throws IOException {
+ return fields.size();
+ }
+ }
+
+ @Override
+ public FieldInfos getFieldInfos() {
+ return fieldInfos;
+ }
+
+ @Override
+ public Bits getLiveDocs() {
+ ensureOpen();
+ return hasDeletions ? parallelReaders[0].getLiveDocs() : null;
+ }
+
+ @Override
+ public Fields fields() {
+ ensureOpen();
+ return fields;
+ }
+
+ @Override
+ public int numDocs() {
+ // Don't call ensureOpen() here (it could affect performance)
+ return numDocs;
+ }
+
+ @Override
+ public int maxDoc() {
+ // Don't call ensureOpen() here (it could affect performance)
+ return maxDoc;
+ }
+
+ @Override
+ public boolean hasDeletions() {
+ ensureOpen();
+ return hasDeletions;
+ }
+
+ @Override
+ public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
+ ensureOpen();
+ for (final AtomicReader reader: storedFieldsReaders) {
+ reader.document(docID, visitor);
+ }
+ }
+
+ // get all vectors
+ @Override
+ public Fields getTermVectors(int docID) throws IOException {
+ ensureOpen();
+ ParallelFields fields = new ParallelFields();
+ for (Map.Entry ent : fieldToReader.entrySet()) {
+ String fieldName = ent.getKey();
+ Terms vector = ent.getValue().getTermVector(docID, fieldName);
+ if (vector != null) {
+ fields.addField(fieldName, vector);
+ }
+ }
+
+ return fields;
+ }
+
+ @Override
+ public boolean hasNorms(String field) throws IOException {
+ ensureOpen();
+ AtomicReader reader = fieldToReader.get(field);
+ return reader==null ? false : reader.hasNorms(field);
+ }
+
+ @Override
+ protected synchronized void doClose() throws IOException {
+ IOException ioe = null;
+ for (AtomicReader reader : parallelReaders) {
+ try {
+ if (closeSubReaders) {
+ reader.close();
+ } else {
+ reader.decRef();
+ }
+ } catch (IOException e) {
+ if (ioe == null) ioe = e;
+ }
+ }
+ // throw the first exception
+ if (ioe != null) throw ioe;
+ }
+
+ @Override
+ public DocValues docValues(String field) throws IOException {
+ ensureOpen();
+ AtomicReader reader = fieldToReader.get(field);
+ return reader == null ? null : reader.docValues(field);
+ }
+
+ @Override
+ public DocValues normValues(String field) throws IOException {
+ ensureOpen();
+ AtomicReader reader = fieldToReader.get(field);
+ return reader == null ? null : reader.normValues(field);
+ }
+}
Index: lucene/core/src/java/org/apache/lucene/index/ParallelAtomicReader.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/ParallelAtomicReader.java (revision 0)
+++ lucene/core/src/java/org/apache/lucene/index/ParallelAtomicReader.java (working copy)
Property changes on: lucene/core/src/java/org/apache/lucene/index/ParallelAtomicReader.java
___________________________________________________________________
Added: svn:keywords
## -0,0 +1 ##
+Date Author Id Revision HeadURL
Added: svn:eol-style
## -0,0 +1 ##
+native
Index: lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java (revision 0)
+++ lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java (working copy)
@@ -0,0 +1,181 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.IdentityHashMap;
+import java.util.Iterator;
+import java.util.Set;
+
+/** An {@link CompositeReader} which reads multiple, parallel indexes. Each index added
+ * must have the same number of documents, and exactly the same hierarchical subreader structure,
+ * but typically each contains different fields. Each document contains the
+ * union of the fields of all
+ * documents with the same document number. When searching, matches for a
+ * query term are from the first index added that has the field.
+ *
+ * This is useful, e.g., with collections that have large fields which
+ * change rarely and small fields that change more frequently. The smaller
+ * fields may be re-indexed in a new index and both indexes may be searched
+ * together.
+ *
+ *
Warning: It is up to you to make sure all indexes
+ * are created and modified the same way. For example, if you add
+ * documents to one index, you need to add the same documents in the
+ * same order to the other indexes. Failure to do so will result in
+ * undefined behavior.
+ * A good strategy to create suitable indexes with {@link IndexWriter} is to use
+ * {@link LogDocMergePolicy}, as this one does not reorder documents
+ * during merging (like {@code TieredMergePolicy}) and triggers merges
+ * by number of documents per segment. If you use different {@link MergePolicy}s
+ * it might happen that the segment structure of your index is no longer predictable.
+ */
+public final class ParallelCompositeReader extends BaseMultiReader {
+ private final boolean closeSubReaders;
+ private final CompositeReader parallelReaders[];
+
+ // nocommit jdoc
+ public ParallelCompositeReader(CompositeReader... readers) throws IOException {
+ this(true, readers, readers);
+ }
+
+ // nocommit jdoc
+ public ParallelCompositeReader(boolean closeSubReaders, CompositeReader[] readers, CompositeReader[] storedFieldReaders) throws IOException {
+ super(prepareSubReaders(readers, storedFieldReaders));
+ this.closeSubReaders = closeSubReaders;
+ this.parallelReaders = readers;
+ if (!closeSubReaders) {
+ for (CompositeReader reader : this.parallelReaders) {
+ reader.incRef();
+ }
+ }
+ }
+
+ // For now, we require that the storedFieldReaders are
+ // also included in readers; in theory we could relax
+ // this.
+ static void validateSubset(CompositeReader[] readers, CompositeReader[] storedFieldsReaders) {
+ final Set readersSet = Collections.newSetFromMap(new IdentityHashMap());
+ for(CompositeReader reader : readers) {
+ readersSet.add(reader);
+ }
+ for(CompositeReader storedReader : storedFieldsReaders) {
+ if (!readersSet.contains(storedReader)) {
+ throw new IllegalArgumentException("each reader in storedFieldsReaders must also be in the readers array (" + storedReader + " is not)");
+ }
+ }
+ }
+
+ private static IndexReader[] prepareSubReaders(CompositeReader[] readers, CompositeReader[] storedFieldsReaders) throws IOException {
+ validateSubset(readers, storedFieldsReaders);
+ if (readers.length == 0) {
+ return new IndexReader[0];
+ } else {
+
+ final IndexReader[] firstSubReaders = readers[0].getSequentialSubReaders();
+
+ final int maxDoc = readers[0].maxDoc();
+ final int numDocs = readers[0].numDocs();
+ final int[] childSizes = new int[firstSubReaders.length];
+ for (int i = 0; i < firstSubReaders.length; i++) {
+ childSizes[i] = firstSubReaders[i].maxDoc();
+ }
+
+ // hierarchically build the same subreader structure as the first CompositeReader with Parallel*Readers:
+ final IndexReader[] subReaders = new IndexReader[firstSubReaders.length];
+ for (int i = 0; i < subReaders.length; i++) {
+ final CompositeReader reader = readers[i];
+ final IndexReader[] subs = reader.getSequentialSubReaders();
+ if (reader.maxDoc() != maxDoc) {
+ throw new IllegalArgumentException("All readers must have same maxDoc: "+maxDoc+"!="+reader.maxDoc());
+ }
+ if (reader.numDocs() != numDocs) {
+ throw new IllegalArgumentException("All readers must have same numDocs: "+numDocs+"!="+reader.numDocs());
+ }
+ if (subs.length != childSizes.length) {
+ throw new IllegalArgumentException("All readers must have same number of subReaders");
+ }
+ for (int subIDX = 0; subIDX < subs.length; subIDX++) {
+ if (subs[subIDX].maxDoc() != childSizes[subIDX]) {
+ throw new IllegalArgumentException("All readers must have same subReader maxDoc");
+ }
+ }
+
+ if (firstSubReaders[i] instanceof AtomicReader) {
+ final AtomicReader[] atomicSubs = new AtomicReader[subReaders.length];
+ for (int j = 0; j < readers.length; j++) {
+ atomicSubs[j] = (AtomicReader) readers[j].getSequentialSubReaders()[i];
+ }
+ final AtomicReader[] storedSubs = new AtomicReader[storedFieldsReaders.length];
+ for (int j = 0; j < storedFieldsReaders.length; j++) {
+ storedSubs[j] = (AtomicReader) storedFieldsReaders[j].getSequentialSubReaders()[i];
+ }
+ // we simply enable closing of subReaders, to prevent incRefs on subReaders
+ // -> for synthetic subReaders, close() is never
+ // called by our doClose()
+ subReaders[i] = new ParallelAtomicReader(true, atomicSubs, storedSubs);
+ } else {
+ assert firstSubReaders[i] instanceof CompositeReader;
+ final CompositeReader[] compositeSubs = new CompositeReader[subReaders.length];
+ for (int j = 0; j < readers.length; j++) {
+ compositeSubs[j] = (CompositeReader) readers[j].getSequentialSubReaders()[i];
+ }
+ final CompositeReader[] storedSubs = new CompositeReader[storedFieldsReaders.length];
+ for (int j = 0; j < storedFieldsReaders.length; j++) {
+ storedSubs[j] = (CompositeReader) storedFieldsReaders[j].getSequentialSubReaders()[i];
+ }
+ // we simply enable closing of subReaders, to prevent incRefs on subReaders
+ // -> for synthetic subReaders, close() is never called by our doClose()
+ subReaders[i] = new ParallelCompositeReader(true, compositeSubs, storedSubs);
+ }
+ }
+
+ return subReaders;
+ }
+ }
+
+ @Override
+ public String toString() {
+ final StringBuilder buffer = new StringBuilder("ParallelCompositeReader(");
+ for (final Iterator iter = Arrays.asList(parallelReaders).iterator(); iter.hasNext();) {
+ buffer.append(iter.next());
+ if (iter.hasNext()) buffer.append(", ");
+ }
+ return buffer.append(')').toString();
+ }
+
+ @Override
+ protected synchronized void doClose() throws IOException {
+ IOException ioe = null;
+ for (final CompositeReader reader : parallelReaders) {
+ try {
+ if (closeSubReaders) {
+ reader.close();
+ } else {
+ reader.decRef();
+ }
+ } catch (IOException e) {
+ if (ioe == null) ioe = e;
+ }
+ }
+ // throw the first exception
+ if (ioe != null) throw ioe;
+ }
+}
Index: lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java (revision 0)
+++ lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java (working copy)
Property changes on: lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java
___________________________________________________________________
Added: svn:keywords
## -0,0 +1 ##
+Date Author Id Revision HeadURL
Added: svn:eol-style
## -0,0 +1 ##
+native
Index: lucene/core/src/java/org/apache/lucene/index/ParallelReader.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/ParallelReader.java (revision 1242234)
+++ lucene/core/src/java/org/apache/lucene/index/ParallelReader.java (working copy)
@@ -1,298 +0,0 @@
-package org.apache.lucene.index;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.*;
-
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
-
-
-/** An AtomicIndexReader which reads multiple, parallel indexes. Each index added
- * must have the same number of documents, but typically each contains
- * different fields. Each document contains the union of the fields of all
- * documents with the same document number. When searching, matches for a
- * query term are from the first index added that has the field.
- *
- * This is useful, e.g., with collections that have large fields which
- * change rarely and small fields that change more frequently. The smaller
- * fields may be re-indexed in a new index and both indexes may be searched
- * together.
- *
- *
Warning: It is up to you to make sure all indexes
- * are created and modified the same way. For example, if you add
- * documents to one index, you need to add the same documents in the
- * same order to the other indexes. Failure to do so will result in
- * undefined behavior.
- */
-public class ParallelReader extends AtomicReader {
- private List readers = new ArrayList();
- private List decrefOnClose = new ArrayList(); // remember which subreaders to decRef on close
- boolean incRefReaders = false;
- private SortedMap fieldToReader = new TreeMap();
- private Map> readerToFields = new HashMap>();
- private List storedFieldReaders = new ArrayList();
- private Map normsCache = new HashMap();
- private int maxDoc;
- private int numDocs;
- private boolean hasDeletions;
- private final FieldInfos fieldInfos;
-
- private final ParallelFields fields = new ParallelFields();
-
- /** Construct a ParallelReader.
- * Note that all subreaders are closed if this ParallelReader is closed.
- */
- public ParallelReader() throws IOException { this(true); }
-
- /** Construct a ParallelReader.
- * @param closeSubReaders indicates whether the subreaders should be closed
- * when this ParallelReader is closed
- */
- public ParallelReader(boolean closeSubReaders) throws IOException {
- super();
- this.incRefReaders = !closeSubReaders;
- fieldInfos = new FieldInfos();
- }
-
- /** {@inheritDoc} */
- @Override
- public String toString() {
- final StringBuilder buffer = new StringBuilder("ParallelReader(");
- final Iterator iter = readers.iterator();
- if (iter.hasNext()) {
- buffer.append(iter.next());
- }
- while (iter.hasNext()) {
- buffer.append(", ").append(iter.next());
- }
- buffer.append(')');
- return buffer.toString();
- }
-
- /** Add an AtomicIndexReader.
- * @throws IOException if there is a low-level IO error
- */
- public void add(AtomicReader reader) throws IOException {
- ensureOpen();
- add(reader, false);
- }
-
- /** Add an AtomicIndexReader whose stored fields will not be returned. This can
- * accelerate search when stored fields are only needed from a subset of
- * the IndexReaders.
- *
- * @throws IllegalArgumentException if not all indexes contain the same number
- * of documents
- * @throws IllegalArgumentException if not all indexes have the same value
- * of {@link AtomicReader#maxDoc()}
- * @throws IOException if there is a low-level IO error
- */
- public void add(AtomicReader reader, boolean ignoreStoredFields)
- throws IOException {
-
- ensureOpen();
- if (readers.size() == 0) {
- this.maxDoc = reader.maxDoc();
- this.numDocs = reader.numDocs();
- this.hasDeletions = reader.hasDeletions();
- }
-
- if (reader.maxDoc() != maxDoc) // check compatibility
- throw new IllegalArgumentException
- ("All readers must have same maxDoc: "+maxDoc+"!="+reader.maxDoc());
- if (reader.numDocs() != numDocs)
- throw new IllegalArgumentException
- ("All readers must have same numDocs: "+numDocs+"!="+reader.numDocs());
-
- final FieldInfos readerFieldInfos = MultiFields.getMergedFieldInfos(reader);
- for(FieldInfo fieldInfo : readerFieldInfos) { // update fieldToReader map
- // NOTE: first reader having a given field "wins":
- if (fieldToReader.get(fieldInfo.name) == null) {
- fieldInfos.add(fieldInfo);
- fieldToReader.put(fieldInfo.name, reader);
- this.fields.addField(fieldInfo.name, reader.terms(fieldInfo.name));
- }
- }
-
- if (!ignoreStoredFields)
- storedFieldReaders.add(reader); // add to storedFieldReaders
- readers.add(reader);
-
- if (incRefReaders) {
- reader.incRef();
- }
- decrefOnClose.add(Boolean.valueOf(incRefReaders));
- synchronized(normsCache) {
- normsCache.clear(); // TODO: don't need to clear this for all fields really?
- }
- }
-
- private class ParallelFieldsEnum extends FieldsEnum {
- String currentField;
- Iterator keys;
- private final Fields fields;
-
- ParallelFieldsEnum(Fields fields) {
- this.fields = fields;
- keys = fieldToReader.keySet().iterator();
- }
-
- @Override
- public String next() throws IOException {
- if (keys.hasNext()) {
- currentField = keys.next();
- } else {
- currentField = null;
- }
- return currentField;
- }
-
- @Override
- public Terms terms() throws IOException {
- return fields.terms(currentField);
- }
-
- }
-
- // Single instance of this, per ParallelReader instance
- private class ParallelFields extends Fields {
- final HashMap fields = new HashMap();
-
- public void addField(String fieldName, Terms terms) throws IOException {
- fields.put(fieldName, terms);
- }
-
- @Override
- public FieldsEnum iterator() throws IOException {
- return new ParallelFieldsEnum(this);
- }
-
- @Override
- public Terms terms(String field) throws IOException {
- return fields.get(field);
- }
-
- @Override
- public int getUniqueFieldCount() throws IOException {
- return fields.size();
- }
- }
-
- @Override
- public FieldInfos getFieldInfos() {
- return fieldInfos;
- }
-
- @Override
- public Bits getLiveDocs() {
- ensureOpen();
- return readers.get(0).getLiveDocs();
- }
-
- @Override
- public Fields fields() {
- ensureOpen();
- return fields;
- }
-
- @Override
- public int numDocs() {
- // Don't call ensureOpen() here (it could affect performance)
- return numDocs;
- }
-
- @Override
- public int maxDoc() {
- // Don't call ensureOpen() here (it could affect performance)
- return maxDoc;
- }
-
- @Override
- public boolean hasDeletions() {
- ensureOpen();
- return hasDeletions;
- }
-
- @Override
- public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
- ensureOpen();
- for (final AtomicReader reader: storedFieldReaders) {
- reader.document(docID, visitor);
- }
- }
-
- // get all vectors
- @Override
- public Fields getTermVectors(int docID) throws IOException {
- ensureOpen();
- ParallelFields fields = new ParallelFields();
- for (Map.Entry ent : fieldToReader.entrySet()) {
- String fieldName = ent.getKey();
- Terms vector = ent.getValue().getTermVector(docID, fieldName);
- if (vector != null) {
- fields.addField(fieldName, vector);
- }
- }
-
- return fields;
- }
-
- @Override
- public boolean hasNorms(String field) throws IOException {
- ensureOpen();
- AtomicReader reader = fieldToReader.get(field);
- return reader==null ? false : reader.hasNorms(field);
- }
-
- // for testing
- AtomicReader[] getSubReaders() {
- return readers.toArray(new AtomicReader[readers.size()]);
- }
-
- @Override
- protected synchronized void doClose() throws IOException {
- for (int i = 0; i < readers.size(); i++) {
- if (decrefOnClose.get(i).booleanValue()) {
- readers.get(i).decRef();
- } else {
- readers.get(i).close();
- }
- }
- }
-
- // TODO: I suspect this is completely untested!!!!!
- @Override
- public DocValues docValues(String field) throws IOException {
- AtomicReader reader = fieldToReader.get(field);
- return reader == null ? null : reader.docValues(field);
- }
-
- // TODO: I suspect this is completely untested!!!!!
- @Override
- public synchronized DocValues normValues(String field) throws IOException {
- DocValues values = normsCache.get(field);
- if (values == null) {
- AtomicReader reader = fieldToReader.get(field);
- values = reader == null ? null : reader.normValues(field);
- normsCache.put(field, values);
- }
- return values;
- }
-}
Index: lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java (revision 0)
+++ lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java (working copy)
@@ -0,0 +1,271 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestParallelAtomicReader extends LuceneTestCase {
+
+ private IndexSearcher parallel, single;
+ private Directory dir, dir1, dir2;
+
+ public void testQueries() throws Exception {
+ single = single(random);
+ parallel = parallel(random);
+
+ queryTest(new TermQuery(new Term("f1", "v1")));
+ queryTest(new TermQuery(new Term("f1", "v2")));
+ queryTest(new TermQuery(new Term("f2", "v1")));
+ queryTest(new TermQuery(new Term("f2", "v2")));
+ queryTest(new TermQuery(new Term("f3", "v1")));
+ queryTest(new TermQuery(new Term("f3", "v2")));
+ queryTest(new TermQuery(new Term("f4", "v1")));
+ queryTest(new TermQuery(new Term("f4", "v2")));
+
+ BooleanQuery bq1 = new BooleanQuery();
+ bq1.add(new TermQuery(new Term("f1", "v1")), Occur.MUST);
+ bq1.add(new TermQuery(new Term("f4", "v1")), Occur.MUST);
+ queryTest(bq1);
+
+ single.getIndexReader().close(); single = null;
+ parallel.getIndexReader().close(); parallel = null;
+ dir.close(); dir = null;
+ dir1.close(); dir1 = null;
+ dir2.close(); dir2 = null;
+ }
+
+ public void testFieldNames() throws Exception {
+ Directory dir1 = getDir1(random);
+ Directory dir2 = getDir2(random);
+ ParallelAtomicReader pr = new ParallelAtomicReader(SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)),
+ SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2)));
+ FieldInfos fieldInfos = pr.getFieldInfos();
+ assertEquals(4, fieldInfos.size());
+ assertNotNull(fieldInfos.fieldInfo("f1"));
+ assertNotNull(fieldInfos.fieldInfo("f2"));
+ assertNotNull(fieldInfos.fieldInfo("f3"));
+ assertNotNull(fieldInfos.fieldInfo("f4"));
+ pr.close();
+ dir1.close();
+ dir2.close();
+ }
+
+ public void testRefCounts1() throws IOException {
+ Directory dir1 = getDir1(random);
+ Directory dir2 = getDir2(random);
+ AtomicReader ir1, ir2;
+ // close subreaders, ParallelReader will not change refCounts, but close on its own close
+ ParallelAtomicReader pr = new ParallelAtomicReader(ir1 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)),
+ ir2 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2)));
+
+ // check RefCounts
+ assertEquals(1, ir1.getRefCount());
+ assertEquals(1, ir2.getRefCount());
+ pr.close();
+ assertEquals(0, ir1.getRefCount());
+ assertEquals(0, ir2.getRefCount());
+ dir1.close();
+ dir2.close();
+ }
+
+ public void testRefCounts2() throws IOException {
+ Directory dir1 = getDir1(random);
+ Directory dir2 = getDir2(random);
+ AtomicReader ir1 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1));
+ AtomicReader ir2 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2));
+ // don't close subreaders, so ParallelReader will increment refcounts
+ ParallelAtomicReader pr = new ParallelAtomicReader(false,
+ new AtomicReader[] {ir1, ir2},
+ new AtomicReader[] {ir1, ir2});
+ // check RefCounts
+ assertEquals(2, ir1.getRefCount());
+ assertEquals(2, ir2.getRefCount());
+ pr.close();
+ assertEquals(1, ir1.getRefCount());
+ assertEquals(1, ir2.getRefCount());
+ ir1.close();
+ ir2.close();
+ assertEquals(0, ir1.getRefCount());
+ assertEquals(0, ir2.getRefCount());
+ dir1.close();
+ dir2.close();
+ }
+
+ public void testIncompatibleIndexes() throws IOException {
+ // two documents:
+ Directory dir1 = getDir1(random);
+
+ // one document only:
+ Directory dir2 = newDirectory();
+ IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+ Document d3 = new Document();
+
+ d3.add(newField("f3", "v1", TextField.TYPE_STORED));
+ w2.addDocument(d3);
+ w2.close();
+
+ AtomicReader ir1 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1));
+ AtomicReader ir2 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2));
+
+ try {
+ new ParallelAtomicReader(ir1, ir2);
+ fail("didn't get exptected exception: indexes don't have same number of documents");
+ } catch (IllegalArgumentException e) {
+ // expected exception
+ }
+
+ try {
+ new ParallelAtomicReader(random.nextBoolean(),
+ new AtomicReader[] {ir1, ir2},
+ new AtomicReader[] {ir1, ir2});
+ fail("didn't get expected exception: indexes don't have same number of documents");
+ } catch (IllegalArgumentException e) {
+ // expected exception
+ }
+ // check RefCounts
+ assertEquals(1, ir1.getRefCount());
+ assertEquals(1, ir2.getRefCount());
+ ir1.close();
+ ir2.close();
+ dir1.close();
+ dir2.close();
+ }
+
+ public void testInvalidStoredFieldsReaders() throws IOException {
+ Directory dir1 = getDir1(random);
+ AtomicReader ir1 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1));
+ AtomicReader ir2 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1));
+
+ try {
+ new ParallelAtomicReader(random.nextBoolean(), new AtomicReader[] {ir1}, new AtomicReader[] {ir2});
+ fail("didn't get expected exception");
+ } catch (IllegalArgumentException e) {
+ // expected exception
+ }
+ // check RefCounts
+ assertEquals(1, ir1.getRefCount());
+ assertEquals(1, ir2.getRefCount());
+ ir1.close();
+ ir2.close();
+ dir1.close();
+ }
+
+ public void testIgnoreStoredFields() throws IOException {
+ Directory dir1 = getDir1(random);
+ Directory dir2 = getDir2(random);
+ AtomicReader ir1 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1));
+ AtomicReader ir2 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2));
+ ParallelAtomicReader pr = new ParallelAtomicReader(true,
+ new AtomicReader[] {ir1, ir2},
+ new AtomicReader[] {ir1});
+ assertEquals("v1", pr.document(0).get("f1"));
+ assertEquals("v1", pr.document(0).get("f2"));
+ assertNull(pr.document(0).get("f3"));
+ assertNull(pr.document(0).get("f4"));
+ pr.close();
+ dir1.close();
+ dir2.close();
+ }
+
+ private void queryTest(Query query) throws IOException {
+ ScoreDoc[] parallelHits = parallel.search(query, null, 1000).scoreDocs;
+ ScoreDoc[] singleHits = single.search(query, null, 1000).scoreDocs;
+ assertEquals(parallelHits.length, singleHits.length);
+ for(int i = 0; i < parallelHits.length; i++) {
+ assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f);
+ Document docParallel = parallel.doc(parallelHits[i].doc);
+ Document docSingle = single.doc(singleHits[i].doc);
+ assertEquals(docParallel.get("f1"), docSingle.get("f1"));
+ assertEquals(docParallel.get("f2"), docSingle.get("f2"));
+ assertEquals(docParallel.get("f3"), docSingle.get("f3"));
+ assertEquals(docParallel.get("f4"), docSingle.get("f4"));
+ }
+ }
+
+ // Fields 1-4 indexed together:
+ private IndexSearcher single(Random random) throws IOException {
+ dir = newDirectory();
+ IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+ Document d1 = new Document();
+ d1.add(newField("f1", "v1", TextField.TYPE_STORED));
+ d1.add(newField("f2", "v1", TextField.TYPE_STORED));
+ d1.add(newField("f3", "v1", TextField.TYPE_STORED));
+ d1.add(newField("f4", "v1", TextField.TYPE_STORED));
+ w.addDocument(d1);
+ Document d2 = new Document();
+ d2.add(newField("f1", "v2", TextField.TYPE_STORED));
+ d2.add(newField("f2", "v2", TextField.TYPE_STORED));
+ d2.add(newField("f3", "v2", TextField.TYPE_STORED));
+ d2.add(newField("f4", "v2", TextField.TYPE_STORED));
+ w.addDocument(d2);
+ w.close();
+
+ DirectoryReader ir = DirectoryReader.open(dir);
+ return newSearcher(ir);
+ }
+
+ // Fields 1 & 2 in one index, 3 & 4 in other, with ParallelReader:
+ private IndexSearcher parallel(Random random) throws IOException {
+ dir1 = getDir1(random);
+ dir2 = getDir2(random);
+ ParallelAtomicReader pr = new ParallelAtomicReader(
+ SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)),
+ SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2)));
+ return newSearcher(pr);
+ }
+
+ private Directory getDir1(Random random) throws IOException {
+ Directory dir1 = newDirectory();
+ IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+ Document d1 = new Document();
+ d1.add(newField("f1", "v1", TextField.TYPE_STORED));
+ d1.add(newField("f2", "v1", TextField.TYPE_STORED));
+ w1.addDocument(d1);
+ Document d2 = new Document();
+ d2.add(newField("f1", "v2", TextField.TYPE_STORED));
+ d2.add(newField("f2", "v2", TextField.TYPE_STORED));
+ w1.addDocument(d2);
+ w1.close();
+ return dir1;
+ }
+
+ private Directory getDir2(Random random) throws IOException {
+ Directory dir2 = newDirectory();
+ IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+ Document d3 = new Document();
+ d3.add(newField("f3", "v1", TextField.TYPE_STORED));
+ d3.add(newField("f4", "v1", TextField.TYPE_STORED));
+ w2.addDocument(d3);
+ Document d4 = new Document();
+ d4.add(newField("f3", "v2", TextField.TYPE_STORED));
+ d4.add(newField("f4", "v2", TextField.TYPE_STORED));
+ w2.addDocument(d4);
+ w2.close();
+ return dir2;
+ }
+
+}
Index: lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java (revision 0)
+++ lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java (working copy)
Property changes on: lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java
___________________________________________________________________
Added: svn:keywords
## -0,0 +1 ##
+Date Author Id Revision HeadURL
Added: svn:eol-style
## -0,0 +1 ##
+native
Index: lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java (revision 0)
+++ lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java (working copy)
@@ -0,0 +1,259 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestParallelCompositeReader extends LuceneTestCase {
+
+ private IndexSearcher parallel, single;
+ private Directory dir, dir1, dir2;
+
+ public void testQueries() throws Exception {
+ single = single(random);
+ parallel = parallel(random);
+
+ queryTest(new TermQuery(new Term("f1", "v1")));
+ queryTest(new TermQuery(new Term("f1", "v2")));
+ queryTest(new TermQuery(new Term("f2", "v1")));
+ queryTest(new TermQuery(new Term("f2", "v2")));
+ queryTest(new TermQuery(new Term("f3", "v1")));
+ queryTest(new TermQuery(new Term("f3", "v2")));
+ queryTest(new TermQuery(new Term("f4", "v1")));
+ queryTest(new TermQuery(new Term("f4", "v2")));
+
+ BooleanQuery bq1 = new BooleanQuery();
+ bq1.add(new TermQuery(new Term("f1", "v1")), Occur.MUST);
+ bq1.add(new TermQuery(new Term("f4", "v1")), Occur.MUST);
+ queryTest(bq1);
+
+ single.getIndexReader().close(); single = null;
+ parallel.getIndexReader().close(); parallel = null;
+ dir.close(); dir = null;
+ dir1.close(); dir1 = null;
+ dir2.close(); dir2 = null;
+ }
+
+ public void testRefCounts1() throws IOException {
+ Directory dir1 = getDir1(random);
+ Directory dir2 = getDir2(random);
+ DirectoryReader ir1, ir2;
+ // close subreaders, ParallelReader will not change refCounts, but close on its own close
+ ParallelCompositeReader pr = new ParallelCompositeReader(ir1 = DirectoryReader.open(dir1),
+ ir2 = DirectoryReader.open(dir2));
+ // check RefCounts
+ assertEquals(1, ir1.getRefCount());
+ assertEquals(1, ir2.getRefCount());
+ pr.close();
+ assertEquals(0, ir1.getRefCount());
+ assertEquals(0, ir2.getRefCount());
+ dir1.close();
+ dir2.close();
+ }
+
+ public void testRefCounts2() throws IOException {
+ Directory dir1 = getDir1(random);
+ Directory dir2 = getDir2(random);
+ DirectoryReader ir1 = DirectoryReader.open(dir1);
+ DirectoryReader ir2 = DirectoryReader.open(dir2);
+ CompositeReader[] readers = new CompositeReader[] {ir1, ir2};
+
+ // don't close subreaders, so ParallelReader will increment refcounts
+ ParallelCompositeReader pr = new ParallelCompositeReader(false, readers, readers);
+ // check RefCounts
+ assertEquals(2, ir1.getRefCount());
+ assertEquals(2, ir2.getRefCount());
+ pr.close();
+ assertEquals(1, ir1.getRefCount());
+ assertEquals(1, ir2.getRefCount());
+ ir1.close();
+ ir2.close();
+ assertEquals(0, ir1.getRefCount());
+ assertEquals(0, ir2.getRefCount());
+ dir1.close();
+ dir2.close();
+ }
+
+ public void testIncompatibleIndexes() throws IOException {
+ // two documents:
+ Directory dir1 = getDir1(random);
+
+ // one document only:
+ Directory dir2 = newDirectory();
+ IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+ Document d3 = new Document();
+
+ d3.add(newField("f3", "v1", TextField.TYPE_STORED));
+ w2.addDocument(d3);
+ w2.close();
+
+ DirectoryReader ir1 = DirectoryReader.open(dir1),
+ ir2 = DirectoryReader.open(dir2);
+ CompositeReader[] readers = new CompositeReader[] {ir1, ir2};
+ try {
+ new ParallelCompositeReader(readers);
+ fail("didn't get expected exception: indexes don't have same number of documents");
+ } catch (IllegalArgumentException e) {
+ // expected exception
+ }
+ try {
+ new ParallelCompositeReader(random.nextBoolean(), readers, readers);
+ fail("didn't get expected exception: indexes don't have same number of documents");
+ } catch (IllegalArgumentException e) {
+ // expected exception
+ }
+ assertEquals(1, ir1.getRefCount());
+ assertEquals(1, ir2.getRefCount());
+ ir1.close();
+ ir2.close();
+ assertEquals(0, ir1.getRefCount());
+ assertEquals(0, ir2.getRefCount());
+ dir1.close();
+ dir2.close();
+ }
+
+ public void testInvalidStoredFieldsReaders() throws IOException {
+ Directory dir1 = getDir1(random);
+ CompositeReader ir1 = DirectoryReader.open(dir1);
+ CompositeReader ir2 = DirectoryReader.open(dir1);
+
+ try {
+ new ParallelCompositeReader(random.nextBoolean(), new CompositeReader[] {ir1}, new CompositeReader[] {ir2});
+ fail("didn't get expected exception");
+ } catch (IllegalArgumentException e) {
+ // expected exception
+ }
+ // check RefCounts
+ assertEquals(1, ir1.getRefCount());
+ assertEquals(1, ir2.getRefCount());
+ ir1.close();
+ ir2.close();
+ dir1.close();
+ }
+
+ public void testIgnoreStoredFields() throws IOException {
+ Directory dir1 = getDir1(random);
+ Directory dir2 = getDir2(random);
+ CompositeReader ir1 = DirectoryReader.open(dir1);
+ CompositeReader ir2 = DirectoryReader.open(dir2);
+
+ ParallelCompositeReader pr = new ParallelCompositeReader(true,
+ new CompositeReader[] {ir1, ir2},
+ new CompositeReader[] {ir1});
+ assertEquals("v1", pr.document(0).get("f1"));
+ assertEquals("v1", pr.document(0).get("f2"));
+ assertNull(pr.document(0).get("f3"));
+ assertNull(pr.document(0).get("f4"));
+ pr.close();
+ dir1.close();
+ dir2.close();
+ }
+
+ private void queryTest(Query query) throws IOException {
+ ScoreDoc[] parallelHits = parallel.search(query, null, 1000).scoreDocs;
+ ScoreDoc[] singleHits = single.search(query, null, 1000).scoreDocs;
+ assertEquals(parallelHits.length, singleHits.length);
+ for(int i = 0; i < parallelHits.length; i++) {
+ assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f);
+ Document docParallel = parallel.doc(parallelHits[i].doc);
+ Document docSingle = single.doc(singleHits[i].doc);
+ assertEquals(docParallel.get("f1"), docSingle.get("f1"));
+ assertEquals(docParallel.get("f2"), docSingle.get("f2"));
+ assertEquals(docParallel.get("f3"), docSingle.get("f3"));
+ assertEquals(docParallel.get("f4"), docSingle.get("f4"));
+ }
+ }
+
+ // Fields 1-4 indexed together:
+ private IndexSearcher single(Random random) throws IOException {
+ dir = newDirectory();
+ IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+ Document d1 = new Document();
+ d1.add(newField("f1", "v1", TextField.TYPE_STORED));
+ d1.add(newField("f2", "v1", TextField.TYPE_STORED));
+ d1.add(newField("f3", "v1", TextField.TYPE_STORED));
+ d1.add(newField("f4", "v1", TextField.TYPE_STORED));
+ w.addDocument(d1);
+ Document d2 = new Document();
+ d2.add(newField("f1", "v2", TextField.TYPE_STORED));
+ d2.add(newField("f2", "v2", TextField.TYPE_STORED));
+ d2.add(newField("f3", "v2", TextField.TYPE_STORED));
+ d2.add(newField("f4", "v2", TextField.TYPE_STORED));
+ w.addDocument(d2);
+ w.close();
+
+ DirectoryReader ir = DirectoryReader.open(dir);
+ return newSearcher(ir);
+ }
+
+ // Fields 1 & 2 in one index, 3 & 4 in other, with ParallelReader:
+ private IndexSearcher parallel(Random random) throws IOException {
+ dir1 = getDir1(random);
+ dir2 = getDir2(random);
+ final DirectoryReader rd1 = DirectoryReader.open(dir1),
+ rd2 = DirectoryReader.open(dir2);
+ assertEquals(2, rd1.getSequentialSubReaders().length);
+ assertEquals(2, rd2.getSequentialSubReaders().length);
+ ParallelCompositeReader pr = new ParallelCompositeReader(rd1, rd2);
+ return newSearcher(pr);
+ }
+
+ private Directory getDir1(Random random) throws IOException {
+ Directory dir1 = newDirectory();
+ IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig(TEST_VERSION_CURRENT,
+ new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES));
+ Document d1 = new Document();
+ d1.add(newField("f1", "v1", TextField.TYPE_STORED));
+ d1.add(newField("f2", "v1", TextField.TYPE_STORED));
+ w1.addDocument(d1);
+ w1.commit();
+ Document d2 = new Document();
+ d2.add(newField("f1", "v2", TextField.TYPE_STORED));
+ d2.add(newField("f2", "v2", TextField.TYPE_STORED));
+ w1.addDocument(d2);
+ w1.close();
+ return dir1;
+ }
+
+ private Directory getDir2(Random random) throws IOException {
+ Directory dir2 = newDirectory();
+ IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig(TEST_VERSION_CURRENT,
+ new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES));
+ Document d3 = new Document();
+ d3.add(newField("f3", "v1", TextField.TYPE_STORED));
+ d3.add(newField("f4", "v1", TextField.TYPE_STORED));
+ w2.addDocument(d3);
+ w2.commit();
+ Document d4 = new Document();
+ d4.add(newField("f3", "v2", TextField.TYPE_STORED));
+ d4.add(newField("f4", "v2", TextField.TYPE_STORED));
+ w2.addDocument(d4);
+ w2.close();
+ return dir2;
+ }
+
+}
Index: lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java (revision 0)
+++ lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java (working copy)
Property changes on: lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java
___________________________________________________________________
Added: svn:keywords
## -0,0 +1 ##
+Date Author Id Revision HeadURL
Added: svn:eol-style
## -0,0 +1 ##
+native
Index: lucene/core/src/test/org/apache/lucene/index/TestParallelReader.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestParallelReader.java (revision 1242234)
+++ lucene/core/src/test/org/apache/lucene/index/TestParallelReader.java (working copy)
@@ -1,192 +0,0 @@
-package org.apache.lucene.index;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.Random;
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.search.*;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
-
-public class TestParallelReader extends LuceneTestCase {
-
- private IndexSearcher parallel;
- private IndexSearcher single;
- private Directory dir, dir1, dir2;
-
- @Override
- public void setUp() throws Exception {
- super.setUp();
- single = single(random);
- parallel = parallel(random);
- }
-
- @Override
- public void tearDown() throws Exception {
- single.getIndexReader().close();
- parallel.getIndexReader().close();
- dir.close();
- dir1.close();
- dir2.close();
- super.tearDown();
- }
-
- public void testQueries() throws Exception {
- queryTest(new TermQuery(new Term("f1", "v1")));
- queryTest(new TermQuery(new Term("f1", "v2")));
- queryTest(new TermQuery(new Term("f2", "v1")));
- queryTest(new TermQuery(new Term("f2", "v2")));
- queryTest(new TermQuery(new Term("f3", "v1")));
- queryTest(new TermQuery(new Term("f3", "v2")));
- queryTest(new TermQuery(new Term("f4", "v1")));
- queryTest(new TermQuery(new Term("f4", "v2")));
-
- BooleanQuery bq1 = new BooleanQuery();
- bq1.add(new TermQuery(new Term("f1", "v1")), Occur.MUST);
- bq1.add(new TermQuery(new Term("f4", "v1")), Occur.MUST);
- queryTest(bq1);
- }
-
- public void testFieldNames() throws Exception {
- Directory dir1 = getDir1(random);
- Directory dir2 = getDir2(random);
- ParallelReader pr = new ParallelReader();
- pr.add(SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)));
- pr.add(SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2)));
- FieldInfos fieldInfos = pr.getFieldInfos();
- assertEquals(4, fieldInfos.size());
- assertNotNull(fieldInfos.fieldInfo("f1"));
- assertNotNull(fieldInfos.fieldInfo("f2"));
- assertNotNull(fieldInfos.fieldInfo("f3"));
- assertNotNull(fieldInfos.fieldInfo("f4"));
- pr.close();
- dir1.close();
- dir2.close();
- }
-
- public void testIncompatibleIndexes() throws IOException {
- // two documents:
- Directory dir1 = getDir1(random);
-
- // one document only:
- Directory dir2 = newDirectory();
- IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- Document d3 = new Document();
-
- d3.add(newField("f3", "v1", TextField.TYPE_STORED));
- w2.addDocument(d3);
- w2.close();
-
- ParallelReader pr = new ParallelReader();
- pr.add(SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)));
- DirectoryReader ir = DirectoryReader.open(dir2);
- try {
- pr.add(SlowCompositeReaderWrapper.wrap(ir));
- fail("didn't get exptected exception: indexes don't have same number of documents");
- } catch (IllegalArgumentException e) {
- // expected exception
- }
- pr.close();
- ir.close();
- dir1.close();
- dir2.close();
- }
-
- private void queryTest(Query query) throws IOException {
- ScoreDoc[] parallelHits = parallel.search(query, null, 1000).scoreDocs;
- ScoreDoc[] singleHits = single.search(query, null, 1000).scoreDocs;
- assertEquals(parallelHits.length, singleHits.length);
- for(int i = 0; i < parallelHits.length; i++) {
- assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f);
- Document docParallel = parallel.doc(parallelHits[i].doc);
- Document docSingle = single.doc(singleHits[i].doc);
- assertEquals(docParallel.get("f1"), docSingle.get("f1"));
- assertEquals(docParallel.get("f2"), docSingle.get("f2"));
- assertEquals(docParallel.get("f3"), docSingle.get("f3"));
- assertEquals(docParallel.get("f4"), docSingle.get("f4"));
- }
- }
-
- // Fields 1-4 indexed together:
- private IndexSearcher single(Random random) throws IOException {
- dir = newDirectory();
- IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- Document d1 = new Document();
- d1.add(newField("f1", "v1", TextField.TYPE_STORED));
- d1.add(newField("f2", "v1", TextField.TYPE_STORED));
- d1.add(newField("f3", "v1", TextField.TYPE_STORED));
- d1.add(newField("f4", "v1", TextField.TYPE_STORED));
- w.addDocument(d1);
- Document d2 = new Document();
- d2.add(newField("f1", "v2", TextField.TYPE_STORED));
- d2.add(newField("f2", "v2", TextField.TYPE_STORED));
- d2.add(newField("f3", "v2", TextField.TYPE_STORED));
- d2.add(newField("f4", "v2", TextField.TYPE_STORED));
- w.addDocument(d2);
- w.close();
-
- DirectoryReader ir = DirectoryReader.open(dir);
- return newSearcher(ir);
- }
-
- // Fields 1 & 2 in one index, 3 & 4 in other, with ParallelReader:
- private IndexSearcher parallel(Random random) throws IOException {
- dir1 = getDir1(random);
- dir2 = getDir2(random);
- ParallelReader pr = new ParallelReader();
- pr.add(SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)));
- pr.add(SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2)));
- return newSearcher(pr);
- }
-
- private Directory getDir1(Random random) throws IOException {
- Directory dir1 = newDirectory();
- IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- Document d1 = new Document();
- d1.add(newField("f1", "v1", TextField.TYPE_STORED));
- d1.add(newField("f2", "v1", TextField.TYPE_STORED));
- w1.addDocument(d1);
- Document d2 = new Document();
- d2.add(newField("f1", "v2", TextField.TYPE_STORED));
- d2.add(newField("f2", "v2", TextField.TYPE_STORED));
- w1.addDocument(d2);
- w1.close();
- return dir1;
- }
-
- private Directory getDir2(Random random) throws IOException {
- Directory dir2 = newDirectory();
- IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- Document d3 = new Document();
- d3.add(newField("f3", "v1", TextField.TYPE_STORED));
- d3.add(newField("f4", "v1", TextField.TYPE_STORED));
- w2.addDocument(d3);
- Document d4 = new Document();
- d4.add(newField("f3", "v2", TextField.TYPE_STORED));
- d4.add(newField("f4", "v2", TextField.TYPE_STORED));
- w2.addDocument(d4);
- w2.close();
- return dir2;
- }
-
-}
Index: lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java (revision 1242234)
+++ lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java (working copy)
@@ -30,7 +30,7 @@
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
/**
- * Some tests for {@link ParallelReader}s with empty indexes
+ * Some tests for {@link ParallelAtomicReader}s with empty indexes
*
* @author Christian Kohlschuetter
*/
@@ -52,9 +52,9 @@
Directory rdOut = newDirectory();
IndexWriter iwOut = new IndexWriter(rdOut, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- ParallelReader pr = new ParallelReader();
- pr.add(SlowCompositeReaderWrapper.wrap(DirectoryReader.open(rd1)));
- pr.add(SlowCompositeReaderWrapper.wrap(DirectoryReader.open(rd2)));
+ ParallelAtomicReader pr = new ParallelAtomicReader(
+ SlowCompositeReaderWrapper.wrap(DirectoryReader.open(rd1)),
+ SlowCompositeReaderWrapper.wrap(DirectoryReader.open(rd2)));
// When unpatched, Lucene crashes here with a NoSuchElementException (caused by ParallelTermEnum)
iwOut.addIndexes(pr);
@@ -115,15 +115,20 @@
Directory rdOut = newDirectory();
IndexWriter iwOut = new IndexWriter(rdOut, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- ParallelReader pr = new ParallelReader();
- pr.add(SlowCompositeReaderWrapper.wrap(DirectoryReader.open(rd1)));
- pr.add(SlowCompositeReaderWrapper.wrap(DirectoryReader.open(rd2)));
+ final DirectoryReader reader1, reader2;
+ ParallelAtomicReader pr = new ParallelAtomicReader(
+ SlowCompositeReaderWrapper.wrap(reader1 = DirectoryReader.open(rd1)),
+ SlowCompositeReaderWrapper.wrap(reader2 = DirectoryReader.open(rd2)));
// When unpatched, Lucene crashes here with an ArrayIndexOutOfBoundsException (caused by TermVectorsWriter)
iwOut.addIndexes(pr);
// ParallelReader closes any IndexReader you added to it:
pr.close();
+
+ // assert subreaders were closed
+ assertEquals(0, reader1.getRefCount());
+ assertEquals(0, reader2.getRefCount());
rd1.close();
rd2.close();
Index: lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java (revision 1242234)
+++ lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java (working copy)
@@ -72,9 +72,7 @@
}
public void test1() throws IOException {
- ParallelReader pr = new ParallelReader();
- pr.add(ir1);
- pr.add(ir2);
+ ParallelAtomicReader pr = new ParallelAtomicReader(ir1, ir2);
Bits liveDocs = pr.getLiveDocs();
Index: modules/facet/src/test/org/apache/lucene/facet/search/TestFacetsAccumulatorWithComplement.java
===================================================================
--- modules/facet/src/test/org/apache/lucene/facet/search/TestFacetsAccumulatorWithComplement.java (revision 1242234)
+++ modules/facet/src/test/org/apache/lucene/facet/search/TestFacetsAccumulatorWithComplement.java (working copy)
@@ -5,7 +5,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiReader;
-import org.apache.lucene.index.ParallelReader;
+import org.apache.lucene.index.ParallelAtomicReader;
import org.apache.lucene.index.SlowCompositeReaderWrapper;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
@@ -68,8 +68,7 @@
@Test
public void testComplementsWithParallerReader() throws Exception {
IndexReader origReader = indexReader;
- ParallelReader pr = new ParallelReader(true);
- pr.add(SlowCompositeReaderWrapper.wrap(origReader));
+ ParallelAtomicReader pr = new ParallelAtomicReader(SlowCompositeReaderWrapper.wrap(origReader));
indexReader = pr;
try {
doTestComplements();