Index: lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java =================================================================== --- lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java (revision 1479914) +++ lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java (working copy) @@ -88,13 +88,16 @@ upto++; } } - assert upto == offsets.length; + //assert upto == offsets.length; } @Override public void visitDocument(int n, StoredFieldVisitor visitor, Set ignoreFields) throws IOException { in.seek(offsets[n]); readLine(); + if (!StringHelper.startsWith(scratch, NUM)) { + return; + } assert StringHelper.startsWith(scratch, NUM); int numFields = parseIntAt(NUM.length); Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexReader.java (revision 1479914) +++ lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsIndexReader.java (working copy) @@ -165,6 +165,9 @@ if (docID < 0 || docID >= maxDoc) { throw new IllegalArgumentException("docID out of range [0-" + maxDoc + "]: " + docID); } + if (docBases.length == 0) { + return -1; + } final int block = block(docID); final int relativeChunk = relativeChunk(block, docID - docBases[block]); return startPointers[block] + relativeStartPointer(block, relativeChunk); Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java (revision 1479914) +++ lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingStoredFieldsReader.java (working copy) @@ -194,7 +194,11 @@ @Override public void visitDocument(int docID, StoredFieldVisitor visitor, Set ignoreFields) throws IOException { - fieldsStream.seek(indexReader.getStartPointer(docID)); + long startPointer = indexReader.getStartPointer(docID); + if (startPointer < 0) { + return; + } + fieldsStream.seek(startPointer); final int docBase = fieldsStream.readVInt(); final int chunkDocs = fieldsStream.readVInt(); Index: lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsReader.java (revision 1479914) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsReader.java (working copy) @@ -90,8 +90,9 @@ assert HEADER_LENGTH_IDX == indexStream.getFilePointer(); final long indexSize = indexStream.length() - HEADER_LENGTH_IDX; this.size = (int) (indexSize >> 3); - // Verify two sources of "maxDoc" agree: - if (this.size != si.getDocCount()) { + // Verify two sources of "maxDoc" agree, but for stacked segments allow + // less actual documents: + if (this.size > si.getDocCount()) { throw new CorruptIndexException("doc counts differ for segment " + segment + ": fieldsReader shows " + this.size + " but segmentInfo shows " + si.getDocCount()); } numTotalDocs = (int) (indexSize >> 3); Index: lucene/core/src/java/org/apache/lucene/index/BufferedDeletesStream.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/BufferedDeletesStream.java (revision 1479914) +++ lucene/core/src/java/org/apache/lucene/index/BufferedDeletesStream.java (working copy) @@ -470,7 +470,7 @@ private synchronized boolean applyTermUpdates( SortedSet packetUpdates, ReadersAndLiveDocs rld, - SegmentReader reader, boolean checkDocId) throws IOException { + SegmentReader reader, boolean exactSegment) throws IOException { Fields fields = reader.fields(); if (fields == null) { // This reader has no postings @@ -479,18 +479,9 @@ assert checkDeleteTerm(null); - UpdatedSegmentData updatedSegmentData = new UpdatedSegmentData(); + UpdatedSegmentData updatedSegmentData = new UpdatedSegmentData(reader, + packetUpdates, exactSegment); - for (FieldsUpdate update : packetUpdates) { - DocsEnum docsEnum = reader.termDocsEnum(update.term); - if (docsEnum != null) { - int docID; - while ((docID = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { - updatedSegmentData.addUpdate(docID, update, checkDocId); - } - } - } - if (updatedSegmentData.hasUpdates()) { rld.setLiveUpdates(updatedSegmentData); return true; Index: lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java (revision 1479914) +++ lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java (working copy) @@ -421,7 +421,7 @@ // create new fields update, which should effect previous docs in the // current segment FieldsUpdate fieldsUpdate = new FieldsUpdate(term, operation, fields, - analyzer, numDocsInRAM.get() - 1); + analyzer, numDocsInRAM.get() - 1, System.currentTimeMillis()); // invert the given fields and store in RAMDirectory dwpt.invertFieldsUpdate(fieldsUpdate, globalFieldNumberMap); dwpt.updateFields(term, fieldsUpdate); @@ -480,6 +480,9 @@ // abortable so that IW.close(false) is able to stop it TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(directory); + updates.startWriting(infoPerCommit.getNextUpdateGen(), + infoPerCommit.info.getDocCount()); + final List mergeReaders = new ArrayList(); AtomicReader reader; while ((reader = updates.nextReader()) != null) { // add new indexes @@ -489,10 +492,6 @@ SegmentMerger merger = new SegmentMerger(mergeReaders, info, infoStream, trackingDir, interval, MergeState.CheckAbort.NONE, globalFieldNumberMap, context); - updates.startWriting(infoPerCommit.getNextUpdateGen(), - infoPerCommit.info.getDocCount(), indexWriter.getConfig() - .getReaderTermsIndexDivisor()); - Set generationReplacementFilenames = null; boolean success = false; try { Index: lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java (revision 1479914) +++ lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java (working copy) @@ -132,13 +132,11 @@ final FieldInfos fieldInfos; final FrozenBufferedDeletes segmentDeletes; final MutableBits liveDocs; - final UpdatedSegmentData liveUpdates; final int delCount; private FlushedSegment(SegmentInfoPerCommit segmentInfo, FieldInfos fieldInfos, BufferedDeletes segmentDeletes, MutableBits liveDocs, - int delCount, BufferedUpdates segmentUpdates, - UpdatedSegmentData liveUpdates) { + int delCount, BufferedUpdates segmentUpdates) { this.segmentInfo = segmentInfo; this.fieldInfos = fieldInfos; if ((segmentDeletes != null && segmentDeletes.any()) @@ -149,7 +147,6 @@ this.segmentDeletes = null; } this.liveDocs = liveDocs; - this.liveUpdates = liveUpdates; this.delCount = delCount; } } @@ -618,9 +615,9 @@ assert segmentInfo != null; - FlushedSegment fs = new FlushedSegment(segmentInfoPerCommit, flushState.fieldInfos, - segmentDeletes, flushState.liveDocs, flushState.delCountOnFlush, - pendingUpdates, flushState.liveUpdates); + FlushedSegment fs = new FlushedSegment(segmentInfoPerCommit, + flushState.fieldInfos, segmentDeletes, flushState.liveDocs, + flushState.delCountOnFlush, pendingUpdates); sealFlushedSegment(fs); doAfterFlush(); success = true; Index: lucene/core/src/java/org/apache/lucene/index/FieldsUpdate.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/FieldsUpdate.java (revision 1479914) +++ lucene/core/src/java/org/apache/lucene/index/FieldsUpdate.java (working copy) @@ -46,7 +46,8 @@ final Operation operation; final Set replacedFields; final Analyzer analyzer; - final int docIDUpto; + final int docIdUpto; + final long timeStamp; IndexDocument fields; Directory directory; @@ -67,7 +68,7 @@ * Document ID of the last document added before this field update */ public FieldsUpdate(Term term, Operation operation, IndexDocument fields, - Analyzer analyzer, int docIDUpto) { + Analyzer analyzer, int docIDUpto, long timeStamp) { this.term = term; this.fields = fields; this.operation = operation; @@ -83,7 +84,8 @@ } } this.analyzer = analyzer; - this.docIDUpto = docIDUpto; + this.docIdUpto = docIDUpto; + this.timeStamp = timeStamp; } /** @@ -97,7 +99,8 @@ this.operation = other.operation; this.replacedFields = other.replacedFields; this.analyzer = other.analyzer; - this.docIDUpto = other.docIDUpto; + this.docIdUpto = other.docIdUpto; + this.timeStamp = other.timeStamp; this.directory = other.directory; this.segmentInfo = other.segmentInfo; } @@ -105,7 +108,15 @@ /* Order FrieldsUpdate by increasing docIDUpto */ @Override public int compareTo(FieldsUpdate other) { - return this.docIDUpto - other.docIDUpto; + int diff = this.docIdUpto - other.docIdUpto; + if (diff == 0) { + if (this.timeStamp < other.timeStamp) { + return -1; + } else if (this.timeStamp > other.timeStamp) { + return 1; + } + } + return diff; } } Index: lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java (revision 1479914) +++ lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java (working copy) @@ -19,9 +19,7 @@ import java.io.IOException; import java.util.Comparator; -import java.util.Iterator; import java.util.Map; -import java.util.SortedSet; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; @@ -367,13 +365,6 @@ segDeletes = null; } - final Map> segUpdates; - if (state.segUpdates != null && state.segUpdates.terms.size() > 0) { - segUpdates = state.segUpdates.terms; - } else { - segUpdates = null; - } - final int[] termIDs = termsHashPerField.sortPostings(termComp); final int numTerms = termsHashPerField.bytesHash.size(); final BytesRef text = new BytesRef(); @@ -406,8 +397,6 @@ final PostingsConsumer postingsConsumer = termsConsumer.startTerm(text); - Term term = new Term(fieldName, text); - final int delDocLimit; if (segDeletes != null) { protoTerm.bytes = text; @@ -421,19 +410,6 @@ delDocLimit = 0; } - final SortedSet termUpdates; - Iterator updatesIterator = null; - FieldsUpdate nextUpdate = null; - if (segUpdates != null) { - termUpdates = segUpdates.get(term); - if (termUpdates != null && !termUpdates.isEmpty()) { - updatesIterator = termUpdates.iterator(); - nextUpdate = updatesIterator.next(); - } - } else { - termUpdates = null; - } - // Now termStates has numToMerge FieldMergeStates // which all share the same term. Now we must // interleave the docID streams. @@ -506,23 +482,6 @@ } } - // make sure we update the relevant documents according to the doc ID - // in which the updates arrived - while (nextUpdate != null && docID > nextUpdate.docIDUpto) { - if (updatesIterator.hasNext()) { - nextUpdate = updatesIterator.next(); - } else { - nextUpdate = null; - } - } - - if (nextUpdate != null) { - if (state.liveUpdates == null) { - state.liveUpdates = new UpdatedSegmentData(); - } - state.liveUpdates.addUpdate(docID, nextUpdate, true); - } - totTF += termFreq; // Carefully copy over the prox + payload info, Index: lucene/core/src/java/org/apache/lucene/index/FrozenBufferedDeletes.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/FrozenBufferedDeletes.java (revision 1479914) +++ lucene/core/src/java/org/apache/lucene/index/FrozenBufferedDeletes.java (working copy) @@ -60,25 +60,38 @@ public FrozenBufferedDeletes(BufferedDeletes deletes, BufferedUpdates updates, boolean isSegmentPrivate) { this.isSegmentPrivate = isSegmentPrivate; - assert !isSegmentPrivate || deletes.terms.size() == 0 : "segment private package should only have del queries"; - Term termsArray[] = deletes.terms.keySet().toArray(new Term[deletes.terms.size()]); - termCount = termsArray.length; - ArrayUtil.timSort(termsArray); - PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder(); - for (Term term : termsArray) { - builder.add(term); + int localBytesUsed = 0; + if (deletes != null) { + assert !isSegmentPrivate || deletes.terms.size() == 0 : "segment private package should only have del queries"; + Term termsArray[] = deletes.terms.keySet().toArray( + new Term[deletes.terms.size()]); + termCount = termsArray.length; + ArrayUtil.timSort(termsArray); + PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder(); + for (Term term : termsArray) { + builder.add(term); + } + terms = builder.finish(); + localBytesUsed += (int) terms.getSizeInBytes(); + + queries = new Query[deletes.queries.size()]; + queryLimits = new int[deletes.queries.size()]; + int upto = 0; + for (Map.Entry ent : deletes.queries.entrySet()) { + queries[upto] = ent.getKey(); + queryLimits[upto] = ent.getValue(); + upto++; + } + + localBytesUsed += queries.length * BYTES_PER_DEL_QUERY; + numTermDeletes = deletes.numTermDeletes.get(); + } else { + terms = null; + numTermDeletes = 0; + queries = null; + queryLimits = null; } - terms = builder.finish(); - queries = new Query[deletes.queries.size()]; - queryLimits = new int[deletes.queries.size()]; - int upto = 0; - for(Map.Entry ent : deletes.queries.entrySet()) { - queries[upto] = ent.getKey(); - queryLimits[upto] = ent.getValue(); - upto++; - } - // freeze updates if (updates == null || updates.terms.isEmpty()) { allUpdates = null; @@ -87,10 +100,10 @@ for (SortedSet list : updates.terms.values()) { allUpdates.addAll(list); } + localBytesUsed += 100; } - bytesUsed = (int) terms.getSizeInBytes() + queries.length * BYTES_PER_DEL_QUERY + 100 /* updates */; - numTermDeletes = deletes.numTermDeletes.get(); + bytesUsed = localBytesUsed; } public void setDelGen(long gen) { Index: lucene/core/src/java/org/apache/lucene/index/SegmentWriteState.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/SegmentWriteState.java (revision 1479914) +++ lucene/core/src/java/org/apache/lucene/index/SegmentWriteState.java (working copy) @@ -67,9 +67,6 @@ * only set if there is one or more deleted documents. */ public MutableBits liveDocs; - // Lazily created: - public UpdatedSegmentData liveUpdates; - /** Unique suffix for any postings files written for this * segment. {@link PerFieldPostingsFormat} sets this for * each of the postings formats it wraps. If you create Index: lucene/core/src/java/org/apache/lucene/index/UpdatedSegmentData.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/UpdatedSegmentData.java (revision 1479914) +++ lucene/core/src/java/org/apache/lucene/index/UpdatedSegmentData.java (working copy) @@ -1,14 +1,19 @@ package org.apache.lucene.index; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; import java.util.Map.Entry; -import java.util.PriorityQueue; +import java.util.SortedSet; import java.util.TreeMap; +import java.util.TreeSet; import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.util.Bits; @@ -36,44 +41,82 @@ class UpdatedSegmentData { static final FieldInfos EMPTY_FIELD_INFOS = new FieldInfos(new FieldInfo[0]); - + /** Updates mapped by doc ID, for each do sorted list of updates. */ - private TreeMap> updatesMap; + private TreeMap> docIdToUpdatesMap; + private HashMap> updatesToDocIdMap; + private LinkedHashMap allApplied; - /** */ private long generation; + private boolean exactSegment; - private Map fieldGenerationReplacments = new HashMap(); + private Map fieldGenerationReplacments; - private Iterator>> updatesIterator; + private Iterator>> updatesIterator; private int currDocID; private int nextDocID; private int numDocs; - private PriorityQueue nextUpdate; + private TreeSet nextUpdate; private Analyzer analyzer; - private int termsIndexDivisor; - - UpdatedSegmentData() { - updatesMap = new TreeMap>(); + UpdatedSegmentData(SegmentReader reader, + SortedSet packetUpdates, boolean exactSegment) + throws IOException { + docIdToUpdatesMap = new TreeMap<>(); + updatesToDocIdMap = new HashMap<>(); + this.exactSegment = exactSegment; + + allApplied = new LinkedHashMap<>(); + + for (FieldsUpdate update : packetUpdates) { + DocsEnum docsEnum = reader.termDocsEnum(update.term); + if (docsEnum != null) { + int docId; + while ((docId = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + addUpdate(docId, update); + } + } + + // try applying on previous updates in this packet + for (Entry applied : allApplied + .entrySet()) { + DocsEnum termDocsEnum = applied.getValue().termDocsEnum(update.term); + if (termDocsEnum != null + && termDocsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { + List list = updatesToDocIdMap.get(applied.getKey()); + if (list != null) { + for (Integer docId : list) { + addUpdate(docId, update); + } + } + } + } + allApplied.put(update, new UpdateAtomicReader(update.directory, + update.segmentInfo, IOContext.DEFAULT)); + } + } - void addUpdate(int docId, FieldsUpdate fieldsUpdate, boolean checkDocId) { - if (checkDocId && docId > fieldsUpdate.docIDUpto) { + private void addUpdate(int docId, FieldsUpdate fieldsUpdate) { + if (exactSegment && docId > fieldsUpdate.docIdUpto) { return; } - PriorityQueue prevUpdates = updatesMap.get(docId); + TreeSet prevUpdates = docIdToUpdatesMap.get(docId); if (prevUpdates == null) { - prevUpdates = new PriorityQueue(); - updatesMap.put(docId, prevUpdates); - } else { - System.out.println(); + prevUpdates = new TreeSet(); + docIdToUpdatesMap.put(docId, prevUpdates); } prevUpdates.add(fieldsUpdate); + List prevDocIds = updatesToDocIdMap.get(fieldsUpdate); + if (prevDocIds == null) { + prevDocIds = new ArrayList(); + updatesToDocIdMap.put(fieldsUpdate, prevDocIds); + } + prevDocIds.add(docId); } boolean hasUpdates() { - return !updatesMap.isEmpty(); + return !docIdToUpdatesMap.isEmpty(); } /** @@ -83,16 +126,13 @@ * The updates generation. * @param numDocs * number of documents in the base segment - * @param termsIndexDivisor - * Terms index divisor to use in temporary segments */ - void startWriting(long generation, int numDocs, int termsIndexDivisor) { + void startWriting(long generation, int numDocs) { this.generation = generation; this.numDocs = numDocs; - this.termsIndexDivisor = termsIndexDivisor; - updatesIterator = updatesMap.entrySet().iterator(); + + updatesIterator = docIdToUpdatesMap.entrySet().iterator(); currDocID = 0; - fieldGenerationReplacments.clear(); // fetch the first actual updates document if exists nextDocUpdate(); } @@ -102,8 +142,7 @@ */ private void nextDocUpdate() { if (updatesIterator.hasNext()) { - Entry> docUpdates = updatesIterator - .next(); + Entry> docUpdates = updatesIterator.next(); nextDocID = docUpdates.getKey(); nextUpdate = docUpdates.getValue(); } else { @@ -128,9 +167,8 @@ currDocID = nextDocID; } else if (currDocID < numDocs) { // get the an actual updates reader... - FieldsUpdate update = nextUpdate.poll(); - toReturn = new UpdateAtomicReader(update.directory, update.segmentInfo, - IOContext.DEFAULT); + FieldsUpdate update = nextUpdate.pollFirst(); + toReturn = allApplied.get(update); // ... and if done for this document remove from updates map if (nextUpdate.isEmpty()) { @@ -139,6 +177,9 @@ // add generation replacements if exist if (update.replacedFields != null) { + if (fieldGenerationReplacments == null) { + fieldGenerationReplacments = new HashMap(); + } for (String fieldName : update.replacedFields) { FieldGenerationReplacements fieldReplacement = fieldGenerationReplacments .get(fieldName); @@ -158,9 +199,9 @@ } boolean isEmpty() { - return updatesMap.isEmpty(); + return docIdToUpdatesMap.isEmpty(); } - + private class UpdateAtomicReader extends AtomicReader { final private SegmentCoreReaders core; @@ -180,8 +221,7 @@ */ UpdateAtomicReader(Directory fieldsDir, SegmentInfo segmentInfo, IOContext context) throws IOException { - core = new SegmentCoreReaders(null, segmentInfo, -1, context, - termsIndexDivisor); + core = new SegmentCoreReaders(null, segmentInfo, -1, context, -1); numDocs = 1; } @@ -247,8 +287,13 @@ } @Override - protected void doClose() throws IOException {} - + protected void doClose() throws IOException { + if (core == null) { + return; + } + core.decRef(); + } + @Override public NumericDocValues getNumericDocValues(String field) throws IOException { @@ -257,7 +302,7 @@ } return core.getNumericDocValues(field); } - + @Override public BinaryDocValues getBinaryDocValues(String field) throws IOException { if (core == null) { @@ -265,7 +310,7 @@ } return core.getBinaryDocValues(field); } - + @Override public SortedDocValues getSortedDocValues(String field) throws IOException { if (core == null) { @@ -273,7 +318,7 @@ } return core.getSortedDocValues(field); } - + @Override public SortedSetDocValues getSortedSetDocValues(String field) throws IOException { @@ -282,7 +327,7 @@ } return core.getSortedSetDocValues(field); } - + @Override public NumericDocValues getNormValues(String field) throws IOException { if (core == null) { @@ -290,6 +335,6 @@ } return core.getNormValues(field); } - } + } Index: lucene/core/src/test/org/apache/lucene/index/TestFieldReplacements.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestFieldReplacements.java (revision 1479914) +++ lucene/core/src/test/org/apache/lucene/index/TestFieldReplacements.java (working copy) @@ -132,17 +132,12 @@ } private static void addDocuments(Directory directory, Random localRandom, - int maxDocs, boolean randomConfig) throws IOException { + int maxDocs) throws IOException { init(localRandom); HashSet usedTerms = new HashSet(); - final IndexWriterConfig config; - if (randomConfig) { - config = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); - } else { - config = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); - } - System.out.println(config.getMergePolicy()); + final IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, + new MockAnalyzer(random())); config.setCodec(new SimpleTextCodec()); IndexWriter writer = new IndexWriter(directory, config); @@ -273,13 +268,13 @@ } public void testRandomIndexGeneration() throws IOException { - addDocuments(dir, random(), Integer.MAX_VALUE, true); + addDocuments(dir, random(), Integer.MAX_VALUE); DirectoryReader directoryReader = DirectoryReader.open(dir); directoryReader.close(); } public void testAddIndexes() throws IOException { - addDocuments(dir, random(), Integer.MAX_VALUE, true); + addDocuments(dir, random(), Integer.MAX_VALUE); RAMDirectory addedDir = new RAMDirectory(); IndexWriter addedIndexWriter = new IndexWriter(addedDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); @@ -300,13 +295,10 @@ public void testIndexEquality() throws IOException { // create index through updates - addDocuments(dir, new Random(3), Integer.MAX_VALUE, true); + addDocuments(dir, new Random(3), Integer.MAX_VALUE); DirectoryReader updatesReader = DirectoryReader.open(dir); IndexData updatesIndexData = new IndexData(updatesReader); - System.out.println("Updates index data"); - System.out.println(updatesIndexData.toString(false)); - System.out.println(); updatesReader.close(); // create the same index directly @@ -598,9 +590,6 @@ DirectoryReader directReader = DirectoryReader.open(directDir); IndexData directIndexData = new IndexData(directReader); - System.out.println("Direct index data"); - System.out.println(directIndexData.toString(false)); - System.out.println(); directReader.close(); directDir.close(); @@ -808,7 +797,7 @@ } public void testprintIndexes() throws IOException { - File outDir = new File("D:/temp/ifu/compare/scenario/a"); + File outDir = new File("D:/temp/ifu/compare/scenario/b"); outDir.mkdirs(); for (int i = 0; i < 42; i++) { @@ -819,8 +808,7 @@ for (String filename : directory.listAll()) { new File(fsDirFile, filename).delete(); } - System.out.print("" + i + " "); - addDocuments(directory, new Random(3), i, true); + addDocuments(directory, new Random(3), i); DirectoryReader updatesReader = DirectoryReader.open(directory); IndexData updatesIndexData = new IndexData(updatesReader); updatesReader.close();