files = toSync.files(directory, false);
+ for(final String fileName: files) {
+ assert directory.fileExists(fileName): "file " + fileName + " does not exist";
+ }
}
assert testPoint("midStartCommit");
@@ -5010,10 +4569,10 @@
* Sets the {@link PayloadProcessorProvider} to use when merging payloads.
* Note that the given pcp will be invoked for every segment that
* is merged, not only external ones that are given through
- * {@link IndexWriter#addIndexes} or {@link IndexWriter#addIndexesNoOptimize}.
- * If you want only the payloads of the external segments to be processed, you
- * can return null whenever a {@link DirPayloadProcessor} is
- * requested for the {@link Directory} of the {@link IndexWriter}.
+ * {@link #addIndexes}. If you want only the payloads of the external segments
+ * to be processed, you can return null whenever a
+ * {@link DirPayloadProcessor} is requested for the {@link Directory} of the
+ * {@link IndexWriter}.
*
* The default is null which means payloads are processed
* normally (copied) during segment merges. You can also unset it by passing
Index: lucene/src/java/org/apache/lucene/index/NormsWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/NormsWriter.java (revision 948393)
+++ lucene/src/java/org/apache/lucene/index/NormsWriter.java (working copy)
@@ -88,7 +88,7 @@
}
}
- final String normsFileName = IndexFileNames.segmentFileName(state.segmentName, IndexFileNames.NORMS_EXTENSION);
+ final String normsFileName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.NORMS_EXTENSION);
state.flushedFiles.add(normsFileName);
IndexOutput normsOut = state.directory.createOutput(normsFileName);
Index: lucene/src/java/org/apache/lucene/index/SegmentInfo.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/SegmentInfo.java (revision 948393)
+++ lucene/src/java/org/apache/lucene/index/SegmentInfo.java (working copy)
@@ -490,7 +490,7 @@
} else if (isCompoundFile == YES) {
return true;
} else {
- return dir.fileExists(IndexFileNames.segmentFileName(name, IndexFileNames.COMPOUND_FILE_EXTENSION));
+ return dir.fileExists(IndexFileNames.segmentFileName(name, "", IndexFileNames.COMPOUND_FILE_EXTENSION));
}
}
@@ -537,6 +537,7 @@
docStoreOffset = offset;
docStoreSegment = segment;
docStoreIsCompoundFile = isCompoundFile;
+ clearFiles();
}
/**
@@ -615,10 +616,10 @@
boolean useCompoundFile = getUseCompoundFile();
if (useCompoundFile) {
- fileSet.add(IndexFileNames.segmentFileName(name, IndexFileNames.COMPOUND_FILE_EXTENSION));
+ fileSet.add(IndexFileNames.segmentFileName(name, "", IndexFileNames.COMPOUND_FILE_EXTENSION));
} else {
for(String ext : IndexFileNames.NON_STORE_INDEX_EXTENSIONS) {
- addIfExists(fileSet, IndexFileNames.segmentFileName(name, ext));
+ addIfExists(fileSet, IndexFileNames.segmentFileName(name, "", ext));
}
codec.files(dir, this, fileSet);
}
@@ -628,14 +629,14 @@
// vectors) with other segments
assert docStoreSegment != null;
if (docStoreIsCompoundFile) {
- fileSet.add(IndexFileNames.segmentFileName(docStoreSegment, IndexFileNames.COMPOUND_FILE_STORE_EXTENSION));
+ fileSet.add(IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.COMPOUND_FILE_STORE_EXTENSION));
} else {
for (String ext : IndexFileNames.STORE_INDEX_EXTENSIONS)
- addIfExists(fileSet, IndexFileNames.segmentFileName(docStoreSegment, ext));
+ addIfExists(fileSet, IndexFileNames.segmentFileName(docStoreSegment, "", ext));
}
} else if (!useCompoundFile) {
for (String ext : IndexFileNames.STORE_INDEX_EXTENSIONS)
- addIfExists(fileSet, IndexFileNames.segmentFileName(name, ext));
+ addIfExists(fileSet, IndexFileNames.segmentFileName(name, "", ext));
}
String delFileName = IndexFileNames.fileNameFromGeneration(name, IndexFileNames.DELETES_EXTENSION, delGen);
@@ -654,7 +655,7 @@
// No separate norms but maybe plain norms
// in the non compound file case:
if (!hasSingleNormFile && !useCompoundFile) {
- String fileName = IndexFileNames.segmentFileName(name, IndexFileNames.PLAIN_NORMS_EXTENSION + i);
+ String fileName = IndexFileNames.segmentFileName(name, "", IndexFileNames.PLAIN_NORMS_EXTENSION + i);
if (dir.fileExists(fileName)) {
fileSet.add(fileName);
}
@@ -663,9 +664,9 @@
// Pre-2.1: we have to check file existence
String fileName = null;
if (useCompoundFile) {
- fileName = IndexFileNames.segmentFileName(name, IndexFileNames.SEPARATE_NORMS_EXTENSION + i);
+ fileName = IndexFileNames.segmentFileName(name, "", IndexFileNames.SEPARATE_NORMS_EXTENSION + i);
} else if (!hasSingleNormFile) {
- fileName = IndexFileNames.segmentFileName(name, IndexFileNames.PLAIN_NORMS_EXTENSION + i);
+ fileName = IndexFileNames.segmentFileName(name, "", IndexFileNames.PLAIN_NORMS_EXTENSION + i);
}
if (fileName != null && dir.fileExists(fileName)) {
fileSet.add(fileName);
@@ -677,9 +678,9 @@
// matching _X.sN/_X.fN files for our segment:
String prefix;
if (useCompoundFile) {
- prefix = IndexFileNames.segmentFileName(name, IndexFileNames.SEPARATE_NORMS_EXTENSION);
+ prefix = IndexFileNames.segmentFileName(name, "", IndexFileNames.SEPARATE_NORMS_EXTENSION);
} else {
- prefix = IndexFileNames.segmentFileName(name, IndexFileNames.PLAIN_NORMS_EXTENSION);
+ prefix = IndexFileNames.segmentFileName(name, "", IndexFileNames.PLAIN_NORMS_EXTENSION);
}
final String pattern = prefix + "\\d+";
Index: lucene/src/java/org/apache/lucene/index/SegmentInfos.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/SegmentInfos.java (revision 948393)
+++ lucene/src/java/org/apache/lucene/index/SegmentInfos.java (working copy)
@@ -935,15 +935,6 @@
lastGeneration = other.lastGeneration;
}
- // Used only for testing
- public boolean hasExternalSegments(Directory dir) {
- final int numSegments = size();
- for(int i=0;i files) throws IOException {
- files.add(IndexFileNames.segmentFileName(info.name, PreFlexCodec.TERMS_EXTENSION));
- files.add(IndexFileNames.segmentFileName(info.name, PreFlexCodec.TERMS_INDEX_EXTENSION));
- files.add(IndexFileNames.segmentFileName(info.name, PreFlexCodec.FREQ_EXTENSION));
+ files.add(IndexFileNames.segmentFileName(info.name, "", PreFlexCodec.TERMS_EXTENSION));
+ files.add(IndexFileNames.segmentFileName(info.name, "", PreFlexCodec.TERMS_INDEX_EXTENSION));
+ files.add(IndexFileNames.segmentFileName(info.name, "", PreFlexCodec.FREQ_EXTENSION));
if (info.getHasProx()) {
// LUCENE-1739: for certain versions of 2.9-dev,
// hasProx would be incorrectly computed during
// indexing as true, and then stored into the segments
// file, when it should have been false. So we do the
// extra check, here:
- final String prx = IndexFileNames.segmentFileName(info.name, PreFlexCodec.PROX_EXTENSION);
+ final String prx = IndexFileNames.segmentFileName(info.name, "", PreFlexCodec.PROX_EXTENSION);
if (dir.fileExists(prx)) {
files.add(prx);
}
@@ -145,7 +145,7 @@
// to CFS
if (!(dir instanceof CompoundFileReader)) {
- dir0 = cfsReader = new CompoundFileReader(dir, IndexFileNames.segmentFileName(si.name, IndexFileNames.COMPOUND_FILE_EXTENSION), readBufferSize);
+ dir0 = cfsReader = new CompoundFileReader(dir, IndexFileNames.segmentFileName(si.name, "", IndexFileNames.COMPOUND_FILE_EXTENSION), readBufferSize);
} else {
dir0 = dir;
}
Index: lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java (revision 948393)
+++ lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java (working copy)
@@ -102,7 +102,7 @@
segment = seg;
fieldInfos = fis;
- origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, PreFlexCodec.TERMS_EXTENSION),
+ origEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_EXTENSION),
readBufferSize), fieldInfos, false);
size = origEnum.size;
@@ -110,7 +110,7 @@
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
- final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, PreFlexCodec.TERMS_INDEX_EXTENSION),
+ final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(IndexFileNames.segmentFileName(segment, "", PreFlexCodec.TERMS_INDEX_EXTENSION),
readBufferSize), fieldInfos, true);
try {
Index: lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java (revision 948393)
+++ lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsReaderImpl.java (working copy)
@@ -59,15 +59,15 @@
boolean success = false;
try {
- final String docFileName = IndexFileNames.segmentFileName(segmentInfo.name, SepCodec.DOC_EXTENSION);
+ final String docFileName = IndexFileNames.segmentFileName(segmentInfo.name, "", SepCodec.DOC_EXTENSION);
docIn = intFactory.openInput(dir, docFileName);
- skipIn = dir.openInput(IndexFileNames.segmentFileName(segmentInfo.name, SepCodec.SKIP_EXTENSION), readBufferSize);
+ skipIn = dir.openInput(IndexFileNames.segmentFileName(segmentInfo.name, "", SepCodec.SKIP_EXTENSION), readBufferSize);
if (segmentInfo.getHasProx()) {
- freqIn = intFactory.openInput(dir, IndexFileNames.segmentFileName(segmentInfo.name, SepCodec.FREQ_EXTENSION));
- posIn = intFactory.openInput(dir, IndexFileNames.segmentFileName(segmentInfo.name, SepCodec.POS_EXTENSION), readBufferSize);
- payloadIn = dir.openInput(IndexFileNames.segmentFileName(segmentInfo.name, SepCodec.PAYLOAD_EXTENSION), readBufferSize);
+ freqIn = intFactory.openInput(dir, IndexFileNames.segmentFileName(segmentInfo.name, "", SepCodec.FREQ_EXTENSION));
+ posIn = intFactory.openInput(dir, IndexFileNames.segmentFileName(segmentInfo.name, "", SepCodec.POS_EXTENSION), readBufferSize);
+ payloadIn = dir.openInput(IndexFileNames.segmentFileName(segmentInfo.name, "", SepCodec.PAYLOAD_EXTENSION), readBufferSize);
} else {
posIn = null;
payloadIn = null;
@@ -82,13 +82,13 @@
}
public static void files(SegmentInfo segmentInfo, Collection files) {
- files.add(IndexFileNames.segmentFileName(segmentInfo.name, SepCodec.DOC_EXTENSION));
- files.add(IndexFileNames.segmentFileName(segmentInfo.name, SepCodec.SKIP_EXTENSION));
+ files.add(IndexFileNames.segmentFileName(segmentInfo.name, "", SepCodec.DOC_EXTENSION));
+ files.add(IndexFileNames.segmentFileName(segmentInfo.name, "", SepCodec.SKIP_EXTENSION));
if (segmentInfo.getHasProx()) {
- files.add(IndexFileNames.segmentFileName(segmentInfo.name, SepCodec.FREQ_EXTENSION));
- files.add(IndexFileNames.segmentFileName(segmentInfo.name, SepCodec.POS_EXTENSION));
- files.add(IndexFileNames.segmentFileName(segmentInfo.name, SepCodec.PAYLOAD_EXTENSION));
+ files.add(IndexFileNames.segmentFileName(segmentInfo.name, "", SepCodec.FREQ_EXTENSION));
+ files.add(IndexFileNames.segmentFileName(segmentInfo.name, "", SepCodec.POS_EXTENSION));
+ files.add(IndexFileNames.segmentFileName(segmentInfo.name, "", SepCodec.PAYLOAD_EXTENSION));
}
}
Index: lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsWriterImpl.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsWriterImpl.java (revision 948393)
+++ lucene/src/java/org/apache/lucene/index/codecs/sep/SepPostingsWriterImpl.java (working copy)
@@ -76,24 +76,24 @@
public SepPostingsWriterImpl(SegmentWriteState state, IntStreamFactory factory) throws IOException {
super();
- final String docFileName = IndexFileNames.segmentFileName(state.segmentName, SepCodec.DOC_EXTENSION);
+ final String docFileName = IndexFileNames.segmentFileName(state.segmentName, "", SepCodec.DOC_EXTENSION);
state.flushedFiles.add(docFileName);
docOut = factory.createOutput(state.directory, docFileName);
docIndex = docOut.index();
if (state.fieldInfos.hasProx()) {
- final String frqFileName = IndexFileNames.segmentFileName(state.segmentName, SepCodec.FREQ_EXTENSION);
+ final String frqFileName = IndexFileNames.segmentFileName(state.segmentName, "", SepCodec.FREQ_EXTENSION);
state.flushedFiles.add(frqFileName);
freqOut = factory.createOutput(state.directory, frqFileName);
freqIndex = freqOut.index();
- final String posFileName = IndexFileNames.segmentFileName(state.segmentName, SepCodec.POS_EXTENSION);
+ final String posFileName = IndexFileNames.segmentFileName(state.segmentName, "", SepCodec.POS_EXTENSION);
posOut = factory.createOutput(state.directory, posFileName);
state.flushedFiles.add(posFileName);
posIndex = posOut.index();
// TODO: -- only if at least one field stores payloads?
- final String payloadFileName = IndexFileNames.segmentFileName(state.segmentName, SepCodec.PAYLOAD_EXTENSION);
+ final String payloadFileName = IndexFileNames.segmentFileName(state.segmentName, "", SepCodec.PAYLOAD_EXTENSION);
state.flushedFiles.add(payloadFileName);
payloadOut = state.directory.createOutput(payloadFileName);
@@ -105,7 +105,7 @@
payloadOut = null;
}
- final String skipFileName = IndexFileNames.segmentFileName(state.segmentName, SepCodec.SKIP_EXTENSION);
+ final String skipFileName = IndexFileNames.segmentFileName(state.segmentName, "", SepCodec.SKIP_EXTENSION);
state.flushedFiles.add(skipFileName);
skipOut = state.directory.createOutput(skipFileName);
Index: lucene/src/java/org/apache/lucene/index/codecs/standard/SimpleStandardTermsIndexReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/codecs/standard/SimpleStandardTermsIndexReader.java (revision 948393)
+++ lucene/src/java/org/apache/lucene/index/codecs/standard/SimpleStandardTermsIndexReader.java (working copy)
@@ -90,7 +90,7 @@
this.termComp = termComp;
- IndexInput in = dir.openInput(IndexFileNames.segmentFileName(segment, StandardCodec.TERMS_INDEX_EXTENSION));
+ IndexInput in = dir.openInput(IndexFileNames.segmentFileName(segment, "", StandardCodec.TERMS_INDEX_EXTENSION));
boolean success = false;
@@ -421,7 +421,7 @@
}
public static void files(Directory dir, SegmentInfo info, Collection files) {
- files.add(IndexFileNames.segmentFileName(info.name, StandardCodec.TERMS_INDEX_EXTENSION));
+ files.add(IndexFileNames.segmentFileName(info.name, "", StandardCodec.TERMS_INDEX_EXTENSION));
}
public static void getIndexExtensions(Collection extensions) {
Index: lucene/src/java/org/apache/lucene/index/codecs/standard/SimpleStandardTermsIndexWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/codecs/standard/SimpleStandardTermsIndexWriter.java (revision 948393)
+++ lucene/src/java/org/apache/lucene/index/codecs/standard/SimpleStandardTermsIndexWriter.java (working copy)
@@ -46,7 +46,7 @@
private IndexOutput termsOut;
public SimpleStandardTermsIndexWriter(SegmentWriteState state) throws IOException {
- final String indexFileName = IndexFileNames.segmentFileName(state.segmentName, StandardCodec.TERMS_INDEX_EXTENSION);
+ final String indexFileName = IndexFileNames.segmentFileName(state.segmentName, "", StandardCodec.TERMS_INDEX_EXTENSION);
state.flushedFiles.add(indexFileName);
termIndexInterval = state.termIndexInterval;
out = state.directory.createOutput(indexFileName);
Index: lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReaderImpl.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReaderImpl.java (revision 948393)
+++ lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReaderImpl.java (working copy)
@@ -44,12 +44,12 @@
int maxSkipLevels;
public StandardPostingsReaderImpl(Directory dir, SegmentInfo segmentInfo, int readBufferSize) throws IOException {
- freqIn = dir.openInput(IndexFileNames.segmentFileName(segmentInfo.name, StandardCodec.FREQ_EXTENSION),
+ freqIn = dir.openInput(IndexFileNames.segmentFileName(segmentInfo.name, "", StandardCodec.FREQ_EXTENSION),
readBufferSize);
if (segmentInfo.getHasProx()) {
boolean success = false;
try {
- proxIn = dir.openInput(IndexFileNames.segmentFileName(segmentInfo.name, StandardCodec.PROX_EXTENSION),
+ proxIn = dir.openInput(IndexFileNames.segmentFileName(segmentInfo.name, "", StandardCodec.PROX_EXTENSION),
readBufferSize);
success = true;
} finally {
@@ -63,9 +63,9 @@
}
public static void files(Directory dir, SegmentInfo segmentInfo, Collection files) throws IOException {
- files.add(IndexFileNames.segmentFileName(segmentInfo.name, StandardCodec.FREQ_EXTENSION));
+ files.add(IndexFileNames.segmentFileName(segmentInfo.name, "", StandardCodec.FREQ_EXTENSION));
if (segmentInfo.getHasProx()) {
- files.add(IndexFileNames.segmentFileName(segmentInfo.name, StandardCodec.PROX_EXTENSION));
+ files.add(IndexFileNames.segmentFileName(segmentInfo.name, "", StandardCodec.PROX_EXTENSION));
}
}
Index: lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsWriterImpl.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsWriterImpl.java (revision 948393)
+++ lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsWriterImpl.java (working copy)
@@ -59,14 +59,14 @@
public StandardPostingsWriterImpl(SegmentWriteState state) throws IOException {
super();
- String fileName = IndexFileNames.segmentFileName(state.segmentName, StandardCodec.FREQ_EXTENSION);
+ String fileName = IndexFileNames.segmentFileName(state.segmentName, "", StandardCodec.FREQ_EXTENSION);
state.flushedFiles.add(fileName);
freqOut = state.directory.createOutput(fileName);
if (state.fieldInfos.hasProx()) {
// At least one field does not omit TF, so create the
// prox file
- fileName = IndexFileNames.segmentFileName(state.segmentName, StandardCodec.PROX_EXTENSION);
+ fileName = IndexFileNames.segmentFileName(state.segmentName, "", StandardCodec.PROX_EXTENSION);
state.flushedFiles.add(fileName);
proxOut = state.directory.createOutput(fileName);
} else {
Index: lucene/src/java/org/apache/lucene/index/codecs/standard/StandardTermsDictReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/codecs/standard/StandardTermsDictReader.java (revision 948393)
+++ lucene/src/java/org/apache/lucene/index/codecs/standard/StandardTermsDictReader.java (working copy)
@@ -111,7 +111,7 @@
this.termComp = termComp;
- in = dir.openInput(IndexFileNames.segmentFileName(segment, StandardCodec.TERMS_EXTENSION),
+ in = dir.openInput(IndexFileNames.segmentFileName(segment, "", StandardCodec.TERMS_EXTENSION),
readBufferSize);
boolean success = false;
@@ -186,7 +186,7 @@
}
public static void files(Directory dir, SegmentInfo segmentInfo, Collection files) {
- files.add(IndexFileNames.segmentFileName(segmentInfo.name, StandardCodec.TERMS_EXTENSION));
+ files.add(IndexFileNames.segmentFileName(segmentInfo.name, "", StandardCodec.TERMS_EXTENSION));
}
public static void getExtensions(Collection extensions) {
Index: lucene/src/java/org/apache/lucene/index/codecs/standard/StandardTermsDictWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/codecs/standard/StandardTermsDictWriter.java (revision 948393)
+++ lucene/src/java/org/apache/lucene/index/codecs/standard/StandardTermsDictWriter.java (working copy)
@@ -64,7 +64,7 @@
private final Comparator termComp;
public StandardTermsDictWriter(StandardTermsIndexWriter indexWriter, SegmentWriteState state, StandardPostingsWriter postingsWriter, Comparator termComp) throws IOException {
- final String termsFileName = IndexFileNames.segmentFileName(state.segmentName, StandardCodec.TERMS_EXTENSION);
+ final String termsFileName = IndexFileNames.segmentFileName(state.segmentName, "", StandardCodec.TERMS_EXTENSION);
this.indexWriter = indexWriter;
this.termComp = termComp;
out = state.directory.createOutput(termsFileName);
Index: lucene/src/java/org/apache/lucene/store/Directory.java
===================================================================
--- lucene/src/java/org/apache/lucene/store/Directory.java (revision 948393)
+++ lucene/src/java/org/apache/lucene/store/Directory.java (working copy)
@@ -22,7 +22,6 @@
import java.io.Closeable;
import java.util.Collection; // for javadocs
-import java.util.Arrays;
import org.apache.lucene.util.IOUtils;
/** A Directory is a flat list of files. Files may be written once, when they
@@ -198,77 +197,83 @@
return this.toString();
}
-
/**
- * Copy all files of this directory to destination directory. All conflicting files at destination are overwritten
- * NOTE: this method only copies files that look like index files (ie, have extensions matching the known
- * extensions of index files).
- *
NOTE: the source directory should not change while this method is running. Otherwise the results are
- * undefined and you could easily hit a FileNotFoundException.
- *
- * @param to destination directory
+ * Copies the file src to {@link Directory} to under the new
+ * file name dest.
+ *
+ * If you want to copy the entire source directory to the destination one, you
+ * can do so like this:
+ *
+ *
+ * Directory to; // the directory to copy to
+ * for (String file : dir.listAll()) {
+ * dir.copy(to, file, newFile); // newFile can be either file, or a new name
+ * }
+ *
+ *
+ * NOTE: this method does not check whether dest exist and will
+ * overwrite it if it does.
*/
- public final void copyTo(Directory to) throws IOException {
- copyTo(to, Arrays.asList(listAll()));
- }
-
- /**
- *
Copy given files of this directory to destination directory. All conflicting files at destination are overwritten
- * NOTE: the source directory should not change while this method is running. Otherwise the results are
- * undefined and you could easily hit a FileNotFoundException.
- * NOTE: implementations can check if destination directory is of the same type as 'this' and perform optimized copy
- *
- * @param to destination directory
- * @param filenames file names to be copied
- */
- public void copyTo(Directory to, Collection filenames) throws IOException {
- byte[] buf = new byte[BufferedIndexOutput.BUFFER_SIZE];
- for (String filename : filenames) {
- IndexOutput os = null;
- IndexInput is = null;
- IOException priorException = null;
- try {
- // create file in dest directory
- os = to.createOutput(filename);
- // read current file
- is = openInput(filename);
- // and copy to dest directory
- long len = is.length();
- long readCount = 0;
- while (readCount < len) {
- int toRead = readCount + BufferedIndexOutput.BUFFER_SIZE > len ? (int) (len - readCount) : BufferedIndexOutput.BUFFER_SIZE;
- is.readBytes(buf, 0, toRead);
- os.writeBytes(buf, toRead);
- readCount += toRead;
- }
- } catch (IOException ioe) {
- priorException = ioe;
- } finally {
- IOUtils.closeSafely(priorException, os, is);
+ public void copy(Directory to, String src, String dest) throws IOException {
+ IndexOutput os = null;
+ IndexInput is = null;
+ IOException priorException = null;
+ int bufSize = BufferedIndexOutput.BUFFER_SIZE;
+ byte[] buf = new byte[bufSize];
+ try {
+ // create file in dest directory
+ os = to.createOutput(dest);
+ // read current file
+ is = openInput(src);
+ // and copy to dest directory
+ long len = is.length();
+ long numRead = 0;
+ while (numRead < len) {
+ long left = len - numRead;
+ int toRead = (int) (bufSize < left ? bufSize : left);
+ is.readBytes(buf, 0, toRead);
+ os.writeBytes(buf, toRead);
+ numRead += toRead;
}
+ } catch (IOException ioe) {
+ priorException = ioe;
+ } finally {
+ IOUtils.closeSafely(priorException, os, is);
}
}
/**
- * Copy contents of a directory src to a directory dest. If a file in src already exists in dest then the one in dest
- * will be blindly overwritten.
- *
- * NOTE: the source directory cannot change while this method is running. Otherwise the results are
- * undefined and you could easily hit a FileNotFoundException.
- *
- * NOTE: this method only copies files that look like index files (ie, have extensions matching the known
- * extensions of index files).
- *
- * @param src source directory
- * @param dest destination directory
- * @param closeDirSrc if true, call {@link #close()} method on source directory
- * @deprecated should be replaced with src.copyTo(dest); [src.close();]
+ * Copy contents of a directory src to a directory dest. If a file in src
+ * already exists in dest then the one in dest will be blindly overwritten.
+ *
+ * NOTE: the source directory cannot change while this method is
+ * running. Otherwise the results are undefined and you could easily hit a
+ * FileNotFoundException.
+ *
+ * NOTE: this method only copies files that look like index files (ie,
+ * have extensions matching the known extensions of index files).
+ *
+ * @param src source directory
+ * @param dest destination directory
+ * @param closeDirSrc if true, call {@link #close()} method on
+ * source directory
+ * @deprecated should be replaced with calls to
+ * {@link #copy(Directory, String, String)} for every file that
+ * needs copying. You can use the following code:
+ *
+ *
+ * for (String file : src.listAll()) {
+ * src.copy(dest, file, file);
+ * }
+ *
*/
- @Deprecated
public static void copy(Directory src, Directory dest, boolean closeDirSrc) throws IOException {
- src.copyTo(dest);
- if (closeDirSrc)
+ for (String file : src.listAll()) {
+ src.copy(dest, file, file);
+ }
+ if (closeDirSrc) {
src.close();
+ }
}
/**
Index: lucene/src/java/org/apache/lucene/store/FSDirectory.java
===================================================================
--- lucene/src/java/org/apache/lucene/store/FSDirectory.java (revision 948393)
+++ lucene/src/java/org/apache/lucene/store/FSDirectory.java (working copy)
@@ -431,29 +431,27 @@
}
@Override
- public void copyTo(Directory to, Collection filenames) throws IOException {
+ public void copy(Directory to, String src, String dest) throws IOException {
if (to instanceof FSDirectory) {
FSDirectory target = (FSDirectory) to;
-
- for (String filename : filenames) {
- target.ensureCanWrite(filename);
- FileChannel input = null;
- FileChannel output = null;
- IOException priorException = null;
- try {
- input = new FileInputStream(new File(directory, filename)).getChannel();
- output = new FileOutputStream(new File(target.directory, filename)).getChannel();
- output.transferFrom(input, 0, input.size());
- } catch (IOException ioe) {
- priorException = ioe;
- } finally {
- IOUtils.closeSafely(priorException, input, output);
- }
+ target.ensureCanWrite(dest);
+ FileChannel input = null;
+ FileChannel output = null;
+ IOException priorException = null;
+ try {
+ input = new FileInputStream(new File(directory, src)).getChannel();
+ output = new FileOutputStream(new File(target.directory, dest)).getChannel();
+ output.transferFrom(input, 0, input.size());
+ } catch (IOException ioe) {
+ priorException = ioe;
+ } finally {
+ IOUtils.closeSafely(priorException, input, output);
}
- } else
- super.copyTo(to, filenames);
+ } else {
+ super.copy(to, src, dest);
+ }
}
-
+
protected static class FSIndexOutput extends BufferedIndexOutput {
private final FSDirectory parent;
private final String name;
Index: lucene/src/java/org/apache/lucene/store/RAMDirectory.java
===================================================================
--- lucene/src/java/org/apache/lucene/store/RAMDirectory.java (revision 948393)
+++ lucene/src/java/org/apache/lucene/store/RAMDirectory.java (working copy)
@@ -23,6 +23,7 @@
import java.util.HashMap;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
+
import org.apache.lucene.util.ThreadInterruptedException;
/**
@@ -68,7 +69,12 @@
private RAMDirectory(Directory dir, boolean closeDir) throws IOException {
this();
- Directory.copy(dir, this, closeDir);
+ for (String file : dir.listAll()) {
+ dir.copy(this, file, file);
+ }
+ if (closeDir) {
+ dir.close();
+ }
}
@Override
Index: lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (revision 948393)
+++ lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (working copy)
@@ -19,6 +19,7 @@
import java.io.IOException;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
@@ -29,9 +30,10 @@
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.PhraseQuery;
-public class TestAddIndexesNoOptimize extends LuceneTestCase {
+public class TestAddIndexes extends LuceneTestCase {
public void testSimpleCase() throws IOException {
// main directory
Directory dir = new RAMDirectory();
@@ -67,7 +69,7 @@
// test doc count before segments are merged
writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
assertEquals(100, writer.maxDoc());
- writer.addIndexesNoOptimize(new Directory[] { aux, aux2 });
+ writer.addIndexes(new Directory[] { aux, aux2 });
assertEquals(190, writer.maxDoc());
writer.close();
_TestUtil.checkIndex(dir);
@@ -89,7 +91,7 @@
// test doc count before segments are merged/index is optimized
writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
assertEquals(190, writer.maxDoc());
- writer.addIndexesNoOptimize(new Directory[] { aux3 });
+ writer.addIndexes(new Directory[] { aux3 });
assertEquals(230, writer.maxDoc());
writer.close();
@@ -120,7 +122,7 @@
writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
assertEquals(230, writer.maxDoc());
- writer.addIndexesNoOptimize(new Directory[] { aux4 });
+ writer.addIndexes(new Directory[] { aux4 });
assertEquals(231, writer.maxDoc());
writer.close();
@@ -138,7 +140,7 @@
setUpDirs(dir, aux);
IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
- writer.addIndexesNoOptimize(new Directory[] {aux});
+ writer.addIndexes(new Directory[] {aux});
// Adds 10 docs, then replaces them with another 10
// docs, so 10 pending deletes:
@@ -181,13 +183,12 @@
for (int i = 0; i < 20; i++) {
Document doc = new Document();
doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
- doc.add(new Field("content", "bbb " + i, Field.Store.NO,
- Field.Index.ANALYZED));
+ doc.add(new Field("content", "bbb " + i, Field.Store.NO, Field.Index.ANALYZED));
writer.updateDocument(new Term("id", "" + (i%10)), doc);
}
-
- writer.addIndexesNoOptimize(new Directory[] {aux});
-
+
+ writer.addIndexes(new Directory[] {aux});
+
// Deletes one of the 10 added docs, leaving 9:
PhraseQuery q = new PhraseQuery();
q.add(new Term("content", "bbb"));
@@ -231,7 +232,7 @@
q.add(new Term("content", "14"));
writer.deleteDocuments(q);
- writer.addIndexesNoOptimize(new Directory[] {aux});
+ writer.addIndexes(new Directory[] {aux});
writer.optimize();
writer.commit();
@@ -275,7 +276,7 @@
writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
try {
// cannot add self
- writer.addIndexesNoOptimize(new Directory[] { aux, dir });
+ writer.addIndexes(new Directory[] { aux, dir });
assertTrue(false);
}
catch (IllegalArgumentException e) {
@@ -288,7 +289,7 @@
}
// in all the remaining tests, make the doc count of the oldest segment
- // in dir large so that it is never merged in addIndexesNoOptimize()
+ // in dir large so that it is never merged in addIndexes()
// case 1: no tail segments
public void testNoTailSegments() throws IOException {
// main directory
@@ -304,9 +305,8 @@
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
addDocs(writer, 10);
- writer.addIndexesNoOptimize(new Directory[] { aux });
+ writer.addIndexes(new Directory[] { aux });
assertEquals(1040, writer.maxDoc());
- assertEquals(2, writer.getSegmentCount());
assertEquals(1000, writer.getDocCount(0));
writer.close();
@@ -327,9 +327,8 @@
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
addDocs(writer, 2);
- writer.addIndexesNoOptimize(new Directory[] { aux });
+ writer.addIndexes(new Directory[] { aux });
assertEquals(1032, writer.maxDoc());
- assertEquals(2, writer.getSegmentCount());
assertEquals(1000, writer.getDocCount(0));
writer.close();
@@ -351,7 +350,7 @@
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
- writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) });
+ writer.addIndexes(new Directory[] { aux, new RAMDirectory(aux) });
assertEquals(1060, writer.maxDoc());
assertEquals(1000, writer.getDocCount(0));
writer.close();
@@ -381,13 +380,10 @@
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(4));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
- writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) });
- assertEquals(1020, writer.maxDoc());
+ writer.addIndexes(new Directory[] { aux, new RAMDirectory(aux) });
+ assertEquals(1060, writer.maxDoc());
assertEquals(1000, writer.getDocCount(0));
writer.close();
-
- // make sure the index is correct
- verifyNumDocs(dir, 1020);
}
// case 5: tail segments, invariants not hold
@@ -404,7 +400,7 @@
TEST_VERSION_CURRENT, new MockAnalyzer())
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(100));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(10);
- writer.addIndexesNoOptimize(new Directory[] { aux });
+ writer.addIndexes(new Directory[] { aux });
assertEquals(30, writer.maxDoc());
assertEquals(3, writer.getSegmentCount());
writer.close();
@@ -427,13 +423,10 @@
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(6));
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
- writer.addIndexesNoOptimize(new Directory[] { aux, aux2 });
- assertEquals(1025, writer.maxDoc());
+ writer.addIndexes(new Directory[] { aux, aux2 });
+ assertEquals(1060, writer.maxDoc());
assertEquals(1000, writer.getDocCount(0));
writer.close();
-
- // make sure the index is correct
- verifyNumDocs(dir, 1025);
}
private IndexWriter newWriter(Directory dir, IndexWriterConfig conf)
@@ -471,9 +464,9 @@
private void verifyTermDocs(Directory dir, Term term, int numDocs)
throws IOException {
IndexReader reader = IndexReader.open(dir, true);
- TermDocs termDocs = reader.termDocs(term);
+ DocsEnum docsEnum = MultiFields.getTermDocsEnum(reader, null, term.field, new BytesRef(term.text));
int count = 0;
- while (termDocs.next())
+ while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS)
count++;
assertEquals(numDocs, count);
reader.close();
@@ -547,28 +540,10 @@
writer = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer())
.setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(lmp));
- writer.addIndexesNoOptimize(new Directory[] {dir});
+ writer.addIndexes(new Directory[] {dir});
writer.close();
dir.close();
dir2.close();
}
- // LUCENE-1642: make sure CFS of destination indexwriter
- // is respected when copying tail segments
- public void testTargetCFS() throws IOException {
- Directory dir = new RAMDirectory();
- IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false);
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(false);
- addDocs(writer, 1);
- writer.close();
-
- Directory other = new RAMDirectory();
- writer = newWriter(other, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(true);
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(true);
- writer.addIndexesNoOptimize(new Directory[] {dir});
- assertTrue(writer.newestSegment().getUseCompoundFile());
- writer.close();
- }
}
Index: lucene/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (revision 948393)
+++ lucene/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (working copy)
@@ -1,574 +0,0 @@
-package org.apache.lucene.index;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.store.MockRAMDirectory;
-import org.apache.lucene.util._TestUtil;
-
-import org.apache.lucene.search.PhraseQuery;
-
-public class TestAddIndexesNoOptimize extends LuceneTestCase {
- public void testSimpleCase() throws IOException {
- // main directory
- Directory dir = new RAMDirectory();
- // two auxiliary directories
- Directory aux = new RAMDirectory();
- Directory aux2 = new RAMDirectory();
-
- IndexWriter writer = null;
-
- writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT,
- new MockAnalyzer())
- .setOpenMode(OpenMode.CREATE));
- // add 100 documents
- addDocs(writer, 100);
- assertEquals(100, writer.maxDoc());
- writer.close();
- _TestUtil.checkIndex(dir);
-
- writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file
- // add 40 documents in separate files
- addDocs(writer, 40);
- assertEquals(40, writer.maxDoc());
- writer.close();
-
- writer = newWriter(aux2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
- // add 40 documents in compound files
- addDocs2(writer, 50);
- assertEquals(50, writer.maxDoc());
- writer.close();
-
- // test doc count before segments are merged
- writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
- assertEquals(100, writer.maxDoc());
- writer.addIndexesNoOptimize(new Directory[] { aux, aux2 });
- assertEquals(190, writer.maxDoc());
- writer.close();
- _TestUtil.checkIndex(dir);
-
- // make sure the old index is correct
- verifyNumDocs(aux, 40);
-
- // make sure the new index is correct
- verifyNumDocs(dir, 190);
-
- // now add another set in.
- Directory aux3 = new RAMDirectory();
- writer = newWriter(aux3, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
- // add 40 documents
- addDocs(writer, 40);
- assertEquals(40, writer.maxDoc());
- writer.close();
-
- // test doc count before segments are merged/index is optimized
- writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
- assertEquals(190, writer.maxDoc());
- writer.addIndexesNoOptimize(new Directory[] { aux3 });
- assertEquals(230, writer.maxDoc());
- writer.close();
-
- // make sure the new index is correct
- verifyNumDocs(dir, 230);
-
- verifyTermDocs(dir, new Term("content", "aaa"), 180);
-
- verifyTermDocs(dir, new Term("content", "bbb"), 50);
-
- // now optimize it.
- writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
- writer.optimize();
- writer.close();
-
- // make sure the new index is correct
- verifyNumDocs(dir, 230);
-
- verifyTermDocs(dir, new Term("content", "aaa"), 180);
-
- verifyTermDocs(dir, new Term("content", "bbb"), 50);
-
- // now add a single document
- Directory aux4 = new RAMDirectory();
- writer = newWriter(aux4, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
- addDocs2(writer, 1);
- writer.close();
-
- writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
- assertEquals(230, writer.maxDoc());
- writer.addIndexesNoOptimize(new Directory[] { aux4 });
- assertEquals(231, writer.maxDoc());
- writer.close();
-
- verifyNumDocs(dir, 231);
-
- verifyTermDocs(dir, new Term("content", "bbb"), 51);
- }
-
- public void testWithPendingDeletes() throws IOException {
- // main directory
- Directory dir = new MockRAMDirectory();
- // auxiliary directory
- Directory aux = new MockRAMDirectory();
-
- setUpDirs(dir, aux);
- IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
-
- writer.addIndexesNoOptimize(new Directory[] {aux});
-
- // Adds 10 docs, then replaces them with another 10
- // docs, so 10 pending deletes:
- for (int i = 0; i < 20; i++) {
- Document doc = new Document();
- doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
- doc.add(new Field("content", "bbb " + i, Field.Store.NO,
- Field.Index.ANALYZED));
- writer.updateDocument(new Term("id", "" + (i%10)), doc);
- }
- // Deletes one of the 10 added docs, leaving 9:
- PhraseQuery q = new PhraseQuery();
- q.add(new Term("content", "bbb"));
- q.add(new Term("content", "14"));
- writer.deleteDocuments(q);
-
- writer.optimize();
- writer.commit();
-
- verifyNumDocs(dir, 1039);
- verifyTermDocs(dir, new Term("content", "aaa"), 1030);
- verifyTermDocs(dir, new Term("content", "bbb"), 9);
-
- writer.close();
- dir.close();
- aux.close();
- }
-
- public void testWithPendingDeletes2() throws IOException {
- // main directory
- Directory dir = new RAMDirectory();
- // auxiliary directory
- Directory aux = new RAMDirectory();
-
- setUpDirs(dir, aux);
- IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
-
- // Adds 10 docs, then replaces them with another 10
- // docs, so 10 pending deletes:
- for (int i = 0; i < 20; i++) {
- Document doc = new Document();
- doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
- doc.add(new Field("content", "bbb " + i, Field.Store.NO,
- Field.Index.ANALYZED));
- writer.updateDocument(new Term("id", "" + (i%10)), doc);
- }
-
- writer.addIndexesNoOptimize(new Directory[] {aux});
-
- // Deletes one of the 10 added docs, leaving 9:
- PhraseQuery q = new PhraseQuery();
- q.add(new Term("content", "bbb"));
- q.add(new Term("content", "14"));
- writer.deleteDocuments(q);
-
- writer.optimize();
- writer.commit();
-
- verifyNumDocs(dir, 1039);
- verifyTermDocs(dir, new Term("content", "aaa"), 1030);
- verifyTermDocs(dir, new Term("content", "bbb"), 9);
-
- writer.close();
- dir.close();
- aux.close();
- }
-
- public void testWithPendingDeletes3() throws IOException {
- // main directory
- Directory dir = new RAMDirectory();
- // auxiliary directory
- Directory aux = new RAMDirectory();
-
- setUpDirs(dir, aux);
- IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
-
- // Adds 10 docs, then replaces them with another 10
- // docs, so 10 pending deletes:
- for (int i = 0; i < 20; i++) {
- Document doc = new Document();
- doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
- doc.add(new Field("content", "bbb " + i, Field.Store.NO,
- Field.Index.ANALYZED));
- writer.updateDocument(new Term("id", "" + (i%10)), doc);
- }
-
- // Deletes one of the 10 added docs, leaving 9:
- PhraseQuery q = new PhraseQuery();
- q.add(new Term("content", "bbb"));
- q.add(new Term("content", "14"));
- writer.deleteDocuments(q);
-
- writer.addIndexesNoOptimize(new Directory[] {aux});
-
- writer.optimize();
- writer.commit();
-
- verifyNumDocs(dir, 1039);
- verifyTermDocs(dir, new Term("content", "aaa"), 1030);
- verifyTermDocs(dir, new Term("content", "bbb"), 9);
-
- writer.close();
- dir.close();
- aux.close();
- }
-
- // case 0: add self or exceed maxMergeDocs, expect exception
- public void testAddSelf() throws IOException {
- // main directory
- Directory dir = new RAMDirectory();
- // auxiliary directory
- Directory aux = new RAMDirectory();
-
- IndexWriter writer = null;
-
- writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
- // add 100 documents
- addDocs(writer, 100);
- assertEquals(100, writer.maxDoc());
- writer.close();
-
- writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file
- // add 140 documents in separate files
- addDocs(writer, 40);
- writer.close();
- writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file
- addDocs(writer, 100);
- writer.close();
-
- writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
- try {
- // cannot add self
- writer.addIndexesNoOptimize(new Directory[] { aux, dir });
- assertTrue(false);
- }
- catch (IllegalArgumentException e) {
- assertEquals(100, writer.maxDoc());
- }
- writer.close();
-
- // make sure the index is correct
- verifyNumDocs(dir, 100);
- }
-
- // in all the remaining tests, make the doc count of the oldest segment
- // in dir large so that it is never merged in addIndexesNoOptimize()
- // case 1: no tail segments
- public void testNoTailSegments() throws IOException {
- // main directory
- Directory dir = new RAMDirectory();
- // auxiliary directory
- Directory aux = new RAMDirectory();
-
- setUpDirs(dir, aux);
-
- IndexWriter writer = newWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer())
- .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
- addDocs(writer, 10);
-
- writer.addIndexesNoOptimize(new Directory[] { aux });
- assertEquals(1040, writer.maxDoc());
- assertEquals(2, writer.getSegmentCount());
- assertEquals(1000, writer.getDocCount(0));
- writer.close();
-
- // make sure the index is correct
- verifyNumDocs(dir, 1040);
- }
-
- // case 2: tail segments, invariants hold, no copy
- public void testNoCopySegments() throws IOException {
- // main directory
- Directory dir = new RAMDirectory();
- // auxiliary directory
- Directory aux = new RAMDirectory();
-
- setUpDirs(dir, aux);
-
- IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(9));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
- addDocs(writer, 2);
-
- writer.addIndexesNoOptimize(new Directory[] { aux });
- assertEquals(1032, writer.maxDoc());
- assertEquals(2, writer.getSegmentCount());
- assertEquals(1000, writer.getDocCount(0));
- writer.close();
-
- // make sure the index is correct
- verifyNumDocs(dir, 1032);
- }
-
- // case 3: tail segments, invariants hold, copy, invariants hold
- public void testNoMergeAfterCopy() throws IOException {
- // main directory
- Directory dir = new RAMDirectory();
- // auxiliary directory
- Directory aux = new RAMDirectory();
-
- setUpDirs(dir, aux);
-
- IndexWriter writer = newWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer())
- .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
-
- writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) });
- assertEquals(1060, writer.maxDoc());
- assertEquals(1000, writer.getDocCount(0));
- writer.close();
-
- // make sure the index is correct
- verifyNumDocs(dir, 1060);
- }
-
- // case 4: tail segments, invariants hold, copy, invariants not hold
- public void testMergeAfterCopy() throws IOException {
- // main directory
- Directory dir = new RAMDirectory();
- // auxiliary directory
- Directory aux = new RAMDirectory();
-
- setUpDirs(dir, aux);
-
- IndexReader reader = IndexReader.open(aux, false);
- for (int i = 0; i < 20; i++) {
- reader.deleteDocument(i);
- }
- assertEquals(10, reader.numDocs());
- reader.close();
-
- IndexWriter writer = newWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer())
- .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(4));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
-
- writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) });
- assertEquals(1020, writer.maxDoc());
- assertEquals(1000, writer.getDocCount(0));
- writer.close();
-
- // make sure the index is correct
- verifyNumDocs(dir, 1020);
- }
-
- // case 5: tail segments, invariants not hold
- public void testMoreMerges() throws IOException {
- // main directory
- Directory dir = new RAMDirectory();
- // auxiliary directory
- Directory aux = new RAMDirectory();
- Directory aux2 = new RAMDirectory();
-
- setUpDirs(dir, aux);
-
- IndexWriter writer = newWriter(aux2, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer())
- .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(100));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(10);
- writer.addIndexesNoOptimize(new Directory[] { aux });
- assertEquals(30, writer.maxDoc());
- assertEquals(3, writer.getSegmentCount());
- writer.close();
-
- IndexReader reader = IndexReader.open(aux, false);
- for (int i = 0; i < 27; i++) {
- reader.deleteDocument(i);
- }
- assertEquals(3, reader.numDocs());
- reader.close();
-
- reader = IndexReader.open(aux2, false);
- for (int i = 0; i < 8; i++) {
- reader.deleteDocument(i);
- }
- assertEquals(22, reader.numDocs());
- reader.close();
-
- writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
- .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(6));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
-
- writer.addIndexesNoOptimize(new Directory[] { aux, aux2 });
- assertEquals(1025, writer.maxDoc());
- assertEquals(1000, writer.getDocCount(0));
- writer.close();
-
- // make sure the index is correct
- verifyNumDocs(dir, 1025);
- }
-
- private IndexWriter newWriter(Directory dir, IndexWriterConfig conf)
- throws IOException {
- conf.setMergePolicy(new LogDocMergePolicy());
- final IndexWriter writer = new IndexWriter(dir, conf);
- return writer;
- }
-
- private void addDocs(IndexWriter writer, int numDocs) throws IOException {
- for (int i = 0; i < numDocs; i++) {
- Document doc = new Document();
- doc.add(new Field("content", "aaa", Field.Store.NO,
- Field.Index.ANALYZED));
- writer.addDocument(doc);
- }
- }
-
- private void addDocs2(IndexWriter writer, int numDocs) throws IOException {
- for (int i = 0; i < numDocs; i++) {
- Document doc = new Document();
- doc.add(new Field("content", "bbb", Field.Store.NO,
- Field.Index.ANALYZED));
- writer.addDocument(doc);
- }
- }
-
- private void verifyNumDocs(Directory dir, int numDocs) throws IOException {
- IndexReader reader = IndexReader.open(dir, true);
- assertEquals(numDocs, reader.maxDoc());
- assertEquals(numDocs, reader.numDocs());
- reader.close();
- }
-
- private void verifyTermDocs(Directory dir, Term term, int numDocs)
- throws IOException {
- IndexReader reader = IndexReader.open(dir, true);
- TermDocs termDocs = reader.termDocs(term);
- int count = 0;
- while (termDocs.next())
- count++;
- assertEquals(numDocs, count);
- reader.close();
- }
-
- private void setUpDirs(Directory dir, Directory aux) throws IOException {
- IndexWriter writer = null;
-
- writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000));
- // add 1000 documents in 1 segment
- addDocs(writer, 1000);
- assertEquals(1000, writer.maxDoc());
- assertEquals(1, writer.getSegmentCount());
- writer.close();
-
- writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(100));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(10);
- // add 30 documents in 3 segments
- for (int i = 0; i < 3; i++) {
- addDocs(writer, 10);
- writer.close();
- writer = newWriter(aux, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(100));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(10);
- }
- assertEquals(30, writer.maxDoc());
- assertEquals(3, writer.getSegmentCount());
- writer.close();
- }
-
- // LUCENE-1270
- public void testHangOnClose() throws IOException {
-
- Directory dir = new MockRAMDirectory();
- LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
- lmp.setUseCompoundFile(false);
- lmp.setUseCompoundDocStore(false);
- lmp.setMergeFactor(100);
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer())
- .setMaxBufferedDocs(5).setMergePolicy(lmp));
-
- Document doc = new Document();
- doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
- Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- for(int i=0;i<60;i++)
- writer.addDocument(doc);
-
- Document doc2 = new Document();
- doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
- Field.Index.NO));
- doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
- Field.Index.NO));
- doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
- Field.Index.NO));
- doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
- Field.Index.NO));
- for(int i=0;i<10;i++)
- writer.addDocument(doc2);
- writer.close();
-
- Directory dir2 = new MockRAMDirectory();
- lmp = new LogByteSizeMergePolicy();
- lmp.setMinMergeMB(0.0001);
- lmp.setUseCompoundFile(false);
- lmp.setUseCompoundDocStore(false);
- lmp.setMergeFactor(4);
- writer = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT,
- new MockAnalyzer())
- .setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(lmp));
- writer.addIndexesNoOptimize(new Directory[] {dir});
- writer.close();
- dir.close();
- dir2.close();
- }
-
- // LUCENE-1642: make sure CFS of destination indexwriter
- // is respected when copying tail segments
- public void testTargetCFS() throws IOException {
- Directory dir = new RAMDirectory();
- IndexWriter writer = newWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false);
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(false);
- addDocs(writer, 1);
- writer.close();
-
- Directory other = new RAMDirectory();
- writer = newWriter(other, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(true);
- ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(true);
- writer.addIndexesNoOptimize(new Directory[] {dir});
- assertTrue(writer.newestSegment().getUseCompoundFile());
- writer.close();
- }
-}
Index: lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 948393)
+++ lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (working copy)
@@ -41,6 +41,7 @@
import org.apache.lucene.document.FieldSelectorResult;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.document.NumericField;
+import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreDoc;
@@ -48,6 +49,7 @@
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
@@ -246,6 +248,46 @@
assertEquals("test for compressed field should have run 4 times", 4, hasTested29);
}
+ public void testAddOldIndexes() throws IOException {
+ for (String name : oldNames) {
+ unzip(getDataFile("index." + name + ".zip"), name);
+ String fullPath = fullDir(name);
+ Directory dir = FSDirectory.open(new File(fullPath));
+
+ Directory targetDir = new RAMDirectory();
+ IndexWriter w = new IndexWriter(targetDir, new IndexWriterConfig(
+ TEST_VERSION_CURRENT, new MockAnalyzer()));
+ w.addIndexes(new Directory[] { dir });
+ w.close();
+
+ _TestUtil.checkIndex(targetDir);
+
+ dir.close();
+ rmDir(name);
+ }
+ }
+
+ public void testAddOldIndexesReader() throws IOException {
+ for (String name : oldNames) {
+ unzip(getDataFile("index." + name + ".zip"), name);
+ String fullPath = fullDir(name);
+ Directory dir = FSDirectory.open(new File(fullPath));
+ IndexReader reader = IndexReader.open(dir);
+
+ Directory targetDir = new RAMDirectory();
+ IndexWriter w = new IndexWriter(targetDir, new IndexWriterConfig(
+ TEST_VERSION_CURRENT, new MockAnalyzer()));
+ w.addIndexes(new IndexReader[] { reader });
+ w.close();
+ reader.close();
+
+ _TestUtil.checkIndex(targetDir);
+
+ dir.close();
+ rmDir(name);
+ }
+ }
+
public void testSearchOldIndex() throws IOException {
for(int i=0;i 0) {
openWriter();
- writer.addIndexesNoOptimize(dirs);
+ writer.addIndexes(dirs);
rc = 1;
} else {
rc = 0;