Index: lucene/src/test/org/apache/lucene/index/TestParallelReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestParallelReader.java (revision 1054993) +++ lucene/src/test/org/apache/lucene/index/TestParallelReader.java (working copy) @@ -222,6 +222,28 @@ dir1.close(); dir2.close(); } + + public void testSequentialSubReaders() throws IOException { + Directory dir1 = getDir1(random); + Directory dir2 = getDir2(random); + ParallelReader pr = new ParallelReader(); + pr.add(IndexReader.open(dir1, false)); + pr.add(IndexReader.open(dir2, false)); + IndexReader[] seqSubReaders = pr.getSequentialSubReaders(); + assertNotNull(seqSubReaders); + pr.close(); + dir1.close(); + dir2.close(); + dir1 = getDirMultiSeg(10, 1); + dir2 = getDirMultiSeg(10, 2); + pr = new ParallelReader(); + pr.add(IndexReader.open(dir1, false)); + pr.add(IndexReader.open(dir2, false)); + assertNull(pr.getSequentialSubReaders()); + pr.close(); + dir1.close(); + dir2.close(); + } private void queryTest(Query query) throws IOException { ScoreDoc[] parallelHits = parallel.search(query, null, 1000).scoreDocs; @@ -298,5 +320,24 @@ w2.close(); return dir2; } + + private Directory getDirMultiSeg(int docs, int segs) throws IOException { + Directory dir = newDirectory(); + IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()); + int docsPerSeg = docs / segs; + if (docsPerSeg < 2) docsPerSeg = 2; + cfg.setMaxBufferedDocs(docsPerSeg); + ((LogMergePolicy)cfg.getMergePolicy()).setMaxMergeDocs(docsPerSeg); + IndexWriter w = new IndexWriter(dir, cfg); + for (int i = 1; i <= docs; i++) { + Document d = new Document(); + d.add(newField("f1", "v" + i, Field.Store.YES, Field.Index.ANALYZED)); + d.add(newField("f2", "v" + i, Field.Store.YES, Field.Index.ANALYZED)); + w.addDocument(d); + } + w.optimize(segs); + w.close(); + return dir; + } } Index: lucene/src/java/org/apache/lucene/index/ParallelReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/ParallelReader.java (revision 1054993) +++ lucene/src/java/org/apache/lucene/index/ParallelReader.java (working copy) @@ -57,6 +57,9 @@ private int maxDoc; private int numDocs; private boolean hasDeletions; + private boolean syncedSubReaders = true; + private int[] docsPerSegment; + private ParallelReader[] seqSubReaders = null; private ParallelFields fields = new ParallelFields(); @@ -115,6 +118,15 @@ this.maxDoc = reader.maxDoc(); this.numDocs = reader.numDocs(); this.hasDeletions = reader.hasDeletions(); + IndexReader[] subReaders = reader.getSequentialSubReaders(); + if (subReaders == null) { + syncedSubReaders = false; + } else { + docsPerSegment = new int[subReaders.length]; + for (int i = 0; i < subReaders.length; i++) { + docsPerSegment[i] = subReaders[i].maxDoc(); + } + } } if (reader.maxDoc() != maxDoc) // check compatibility @@ -123,7 +135,28 @@ if (reader.numDocs() != numDocs) throw new IllegalArgumentException ("All readers must have same numDocs: "+numDocs+"!="+reader.numDocs()); - + + if (syncedSubReaders && readers.size() > 0) { + IndexReader[] subReaders = reader.getSequentialSubReaders(); + if (subReaders == null || subReaders.length != docsPerSegment.length) { + syncedSubReaders = false; + } else { + // XXX should we check deletedDocs here as well? + for (int i = 0; i < subReaders.length; i++) { + if (subReaders[i].maxDoc() != docsPerSegment[i]) { + syncedSubReaders = false; + break; + } + } + } + } + // reset sub readers + if (seqSubReaders != null) { + for (IndexReader r : seqSubReaders) { + r.close(); + } + seqSubReaders = null; + } Collection fields = reader.getFieldNames(IndexReader.FieldOption.ALL); readerToFields.put(reader, fields); for (final String field : fields) { // update fieldToReader map @@ -486,6 +519,35 @@ return true; } + @Override + public IndexReader[] getSequentialSubReaders() { + if (!syncedSubReaders || readers.size() == 0) { + return null; + } + if (seqSubReaders != null) { + return seqSubReaders; + } + seqSubReaders = new ParallelReader[docsPerSegment.length]; + for (int i = 0; i < seqSubReaders.length; i++) { + try { + seqSubReaders[i] = new ParallelReader(false); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + for (int i = 0; i < readers.size(); i++) { + IndexReader[] subReaders = readers.get(i).getSequentialSubReaders(); + boolean withStoredFields = storedFieldReaders.contains(readers.get(i)); + for (int j = 0; j < subReaders.length; j++) { + try { + seqSubReaders[j].add(subReaders[j], withStoredFields); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + return seqSubReaders; + } /** Not implemented. * @throws UnsupportedOperationException @@ -508,6 +570,11 @@ @Override protected synchronized void doClose() throws IOException { + if (seqSubReaders != null) { + for (IndexReader r : seqSubReaders) { + r.close(); + } + } for (int i = 0; i < readers.size(); i++) { if (decrefOnClose.get(i).booleanValue()) { readers.get(i).decRef();