= 0
- : collator.compare(term.toString(), lowerTermText) > 0)
+ ? collator.compare(term.utf8ToString(), lowerTermText) >= 0
+ : collator.compare(term.utf8ToString(), lowerTermText) > 0)
&& (upperTermText == null
|| (includeUpper
- ? collator.compare(term.toString(), upperTermText) <= 0
- : collator.compare(term.toString(), upperTermText) < 0))) {
+ ? collator.compare(term.utf8ToString(), upperTermText) <= 0
+ : collator.compare(term.utf8ToString(), upperTermText) < 0))) {
return AcceptStatus.YES;
}
return AcceptStatus.NO;
Index: src/java/org/apache/lucene/search/PhraseQuery.java
===================================================================
--- src/java/org/apache/lucene/search/PhraseQuery.java (revision 915795)
+++ src/java/org/apache/lucene/search/PhraseQuery.java (working copy)
@@ -170,7 +170,7 @@
t.field(),
text) != null) {
// term does exist, but has no positions
- throw new IllegalStateException("field \"" + t.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + text + ")");
+ throw new IllegalStateException("field \"" + t.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + t.text() + ")");
} else {
// term does not exist
return null;
Index: src/java/org/apache/lucene/search/TermScorer.java
===================================================================
--- src/java/org/apache/lucene/search/TermScorer.java (revision 915795)
+++ src/java/org/apache/lucene/search/TermScorer.java (working copy)
@@ -24,6 +24,7 @@
/** Expert: A Scorer for documents matching a Term.
*/
final class TermScorer extends Scorer {
+ private Weight weight;
private DocsEnum docsEnum;
private byte[] norms;
private float weightValue;
@@ -53,6 +54,7 @@
TermScorer(Weight weight, DocsEnum td, Similarity similarity, byte[] norms) {
super(similarity);
+ this.weight = weight;
this.docsEnum = td;
this.norms = norms;
this.weightValue = weight.getValue();
@@ -157,7 +159,6 @@
}
/** Returns a string representation of this TermScorer. */
- // nocommit
- //@Override
- //public String toString() { return "scorer(" + weight + ")"; }
+ @Override
+ public String toString() { return "scorer(" + weight + ")"; }
}
Index: src/java/org/apache/lucene/search/FieldCacheImpl.java
===================================================================
--- src/java/org/apache/lucene/search/FieldCacheImpl.java (revision 915795)
+++ src/java/org/apache/lucene/search/FieldCacheImpl.java (working copy)
@@ -667,7 +667,7 @@
break;
}
docs = termsEnum.docs(delDocs, docs);
- final String termval = term.toString();
+ final String termval = term.utf8ToString();
while (true) {
final int docID = docs.nextDoc();
if (docID == DocsEnum.NO_MORE_DOCS) {
@@ -721,7 +721,7 @@
}
// store term text
- mterms[t] = term.toString();
+ mterms[t] = term.utf8ToString();
//System.out.println("FC: ord=" + t + " term=" + term.toBytesString());
docs = termsEnum.docs(delDocs, docs);
@@ -783,7 +783,7 @@
if (b >= '0' && b <= '9') {
number = 10*number + (int) (b-'0');
} else {
- throw new NumberFormatException("could not parse \"" + term + "\" to a number");
+ throw new NumberFormatException("could not parse \"" + term.utf8ToString() + "\" to a number");
}
}
return negMul * number;
Index: src/java/org/apache/lucene/index/LegacyFieldsEnum.java
===================================================================
--- src/java/org/apache/lucene/index/LegacyFieldsEnum.java (revision 915795)
+++ src/java/org/apache/lucene/index/LegacyFieldsEnum.java (working copy)
@@ -92,7 +92,7 @@
if (terms != null) {
terms.close();
}
- terms = r.terms(new Term(field, text.toString()));
+ terms = r.terms(new Term(field, text.utf8ToString()));
final Term t = terms.term();
if (t == null) {
Index: src/java/org/apache/lucene/index/IndexFileNames.java
===================================================================
--- src/java/org/apache/lucene/index/IndexFileNames.java (revision 915795)
+++ src/java/org/apache/lucene/index/IndexFileNames.java (working copy)
@@ -17,10 +17,19 @@
* limitations under the License.
*/
+import org.apache.lucene.index.codecs.Codec;
+
/**
- * Useful constants representing filenames and extensions used by lucene
+ * Useful constants representing filenames and extensions
+ * used by lucene.
+ *
+ * NOTE: extensions used by codecs are not
+ * listed here. You must interact with the {@link Codec}
+ * directly.
+ *
+ * @lucene.internal
*/
-// nocommit -- made public
+
public final class IndexFileNames {
/** Name of the index segment file */
@@ -36,18 +45,6 @@
/** Extension of norms file */
static final String NORMS_EXTENSION = "nrm";
- /** Extension of freq postings file */
- //static final String FREQ_EXTENSION = "frq";
-
- /** Extension of prox postings file */
- //static final String PROX_EXTENSION = "prx";
-
- /** Extension of terms file */
- //static final String TERMS_EXTENSION = "tis";
-
- /** Extension of terms index file */
- //static final String TERMS_INDEX_EXTENSION = "tii";
-
/** Extension of stored fields index file */
static final String FIELDS_INDEX_EXTENSION = "fdx";
@@ -64,7 +61,6 @@
static final String VECTORS_INDEX_EXTENSION = "tvx";
/** Extension of compound file */
- // nocommit made public
public static final String COMPOUND_FILE_EXTENSION = "cfs";
/** Extension of compound file for doc store files*/
@@ -85,6 +81,9 @@
/** Extension of gen file */
static final String GEN_EXTENSION = "gen";
+ // nocommit -- more cleanup needed -- do we really use all
+ // these arrays below?
+
/**
* This array contains all filename extensions used by
* Lucene's index files, with two exceptions, namely the
@@ -98,10 +97,6 @@
FIELD_INFOS_EXTENSION,
FIELDS_INDEX_EXTENSION,
FIELDS_EXTENSION,
- //TERMS_INDEX_EXTENSION,
- //TERMS_EXTENSION,
- //FREQ_EXTENSION,
- //PROX_EXTENSION,
DELETES_EXTENSION,
VECTORS_INDEX_EXTENSION,
VECTORS_DOCUMENTS_EXTENSION,
@@ -109,11 +104,6 @@
GEN_EXTENSION,
NORMS_EXTENSION,
COMPOUND_FILE_STORE_EXTENSION,
- // nocommit -- need cleaner way!
- "doc",
- "pos",
- "pyl",
- "skp"
};
/** File extensions that are added to a compound file
@@ -122,10 +112,6 @@
FIELD_INFOS_EXTENSION,
FIELDS_INDEX_EXTENSION,
FIELDS_EXTENSION,
- //TERMS_INDEX_EXTENSION,
- //TERMS_EXTENSION,
- //FREQ_EXTENSION,
- //PROX_EXTENSION,
VECTORS_INDEX_EXTENSION,
VECTORS_DOCUMENTS_EXTENSION,
VECTORS_FIELDS_EXTENSION,
@@ -142,22 +128,14 @@
static final String[] NON_STORE_INDEX_EXTENSIONS = new String[] {
FIELD_INFOS_EXTENSION,
- //FREQ_EXTENSION,
- //PROX_EXTENSION,
- //TERMS_EXTENSION,
- //TERMS_INDEX_EXTENSION,
NORMS_EXTENSION
};
/** File extensions of old-style index files */
static final String COMPOUND_EXTENSIONS[] = new String[] {
FIELD_INFOS_EXTENSION,
- //FREQ_EXTENSION,
- //PROX_EXTENSION,
FIELDS_INDEX_EXTENSION,
FIELDS_EXTENSION,
- //TERMS_INDEX_EXTENSION,
- //TERMS_EXTENSION
};
static final String COMPOUND_EXTENSIONS_NOT_CODEC[] = new String[] {
Index: src/java/org/apache/lucene/index/SegmentInfo.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentInfo.java (revision 915795)
+++ src/java/org/apache/lucene/index/SegmentInfo.java (working copy)
@@ -400,14 +400,12 @@
if (result == null)
throw new IOException("cannot read directory " + dir + ": listAll() returned null");
- final IndexFileNameFilter filter = IndexFileNameFilter.getFilter();
- String pattern;
- pattern = name + ".s";
- int patternLength = pattern.length();
+ final String pattern = name + ".s\\d+";
for(int i = 0; i < result.length; i++){
String fileName = result[i];
- if (filter.accept(null, fileName) && fileName.startsWith(pattern) && Character.isDigit(fileName.charAt(patternLength)))
- return true;
+ if (fileName.matches(pattern)) {
+ return true;
+ }
}
return false;
}
@@ -701,16 +699,17 @@
// Pre-2.1: we have to scan the dir to find all
// matching _X.sN/_X.fN files for our segment:
String prefix;
- if (useCompoundFile)
+ if (useCompoundFile) {
prefix = name + "." + IndexFileNames.SEPARATE_NORMS_EXTENSION;
- else
+ } else {
prefix = name + "." + IndexFileNames.PLAIN_NORMS_EXTENSION;
- int prefixLength = prefix.length();
+ }
+ final String pattern = prefix + "\\d+";
+
String[] allFiles = dir.listAll();
- final IndexFileNameFilter filter = IndexFileNameFilter.getFilter();
for(int i=0;i prefixLength && Character.isDigit(fileName.charAt(prefixLength)) && fileName.startsWith(prefix)) {
+ if (fileName.matches(pattern)) {
files.add(fileName);
}
}
Index: src/java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentReader.java (revision 915795)
+++ src/java/org/apache/lucene/index/SegmentReader.java (working copy)
@@ -1453,7 +1453,7 @@
@Override
public Term term() {
if (!done && terms != null && currentTerm != null) {
- return new Term(currentField, currentTerm.toString());
+ return new Term(currentField, currentTerm.utf8ToString());
}
return null;
}
Index: src/java/org/apache/lucene/index/SegmentMerger.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentMerger.java (revision 915795)
+++ src/java/org/apache/lucene/index/SegmentMerger.java (working copy)
@@ -76,26 +76,10 @@
private final Codecs codecs;
private Codec codec;
+ private SegmentWriteState segmentWriteState;
- /** This ctor used only by test code.
- *
- * @param dir The Directory to merge the other segments into
- * @param name The name of the new segment
- */
- SegmentMerger(Directory dir, String name) {
+ SegmentMerger(Directory dir, int termIndexInterval, String name, MergePolicy.OneMerge merge, Codecs codecs) {
directory = dir;
- segment = name;
- codecs = Codecs.getDefault();
- checkAbort = new CheckAbort(null, null) {
- @Override
- public void work(double units) throws MergeAbortedException {
- // do nothing
- }
- };
- }
-
- SegmentMerger(IndexWriter writer, String name, MergePolicy.OneMerge merge, Codecs codecs) {
- directory = writer.getDirectory();
this.codecs = codecs;
segment = name;
if (merge != null) {
@@ -108,7 +92,7 @@
}
};
}
- termIndexInterval = writer.getTermIndexInterval();
+ this.termIndexInterval = termIndexInterval;
}
boolean hasProx() {
@@ -183,13 +167,6 @@
}
}
- final List createCompoundFile(String fileName) throws IOException {
- // nocommit -- messy!
- final SegmentWriteState state = new SegmentWriteState(null, directory, segment, fieldInfos, null, mergedDocs, 0, 0, Codecs.getDefault());
- return createCompoundFile(fileName, new SegmentInfo(segment, mergedDocs, directory,
- Codecs.getDefault().getWriter(state)));
- }
-
final List createCompoundFile(String fileName, final SegmentInfo info)
throws IOException {
CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, fileName, checkAbort);
@@ -375,14 +352,17 @@
// details.
throw new RuntimeException("mergeFields produced an invalid result: docCount is " + docCount + " but fdx file size is " + fdxFileLength + " file=" + fileName + " file exists?=" + directory.fileExists(fileName) + "; now aborting this merge to prevent index corruption");
- } else
+ } else {
// If we are skipping the doc stores, that means there
// are no deletions in any of these segments, so we
// just sum numDocs() of each segment to get total docCount
for (final IndexReader reader : readers) {
docCount += reader.numDocs();
}
+ }
+ segmentWriteState = new SegmentWriteState(null, directory, segment, fieldInfos, null, docCount, 0, termIndexInterval, codecs);
+
return docCount;
}
@@ -582,11 +562,9 @@
private final void mergeTerms() throws CorruptIndexException, IOException {
- SegmentWriteState state = new SegmentWriteState(null, directory, segment, fieldInfos, null, mergedDocs, 0, termIndexInterval, codecs);
-
// Let Codecs decide which codec will be used to write
// the new segment:
- codec = codecs.getWriter(state);
+ codec = codecs.getWriter(segmentWriteState);
int docBase = 0;
@@ -647,7 +625,7 @@
}
starts[mergeState.readerCount] = inputDocBase;
- final FieldsConsumer consumer = codec.fieldsConsumer(state);
+ final FieldsConsumer consumer = codec.fieldsConsumer(segmentWriteState);
mergeState.multiDeletedDocs = new MultiBits(subBits, starts);
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java (revision 915795)
+++ src/java/org/apache/lucene/index/IndexWriter.java (working copy)
@@ -3226,7 +3226,7 @@
try {
mergedName = newSegmentName();
- merger = new SegmentMerger(this, mergedName, null, codecs);
+ merger = new SegmentMerger(directory, termIndexInterval, mergedName, null, codecs);
SegmentReader sReader = null;
synchronized(this) {
@@ -4241,7 +4241,7 @@
if (infoStream != null)
message("merging " + merge.segString(directory));
- merger = new SegmentMerger(this, mergedName, merge, codecs);
+ merger = new SegmentMerger(directory, termIndexInterval, mergedName, merge, codecs);
merge.readers = new SegmentReader[numSegments];
merge.readersClone = new SegmentReader[numSegments];
Index: src/java/org/apache/lucene/index/IndexFileDeleter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexFileDeleter.java (revision 915795)
+++ src/java/org/apache/lucene/index/IndexFileDeleter.java (working copy)
@@ -143,27 +143,7 @@
// First pass: walk the files and initialize our ref
// counts:
long currentGen = segmentInfos.getGeneration();
- final Collection codecsExtensions = codecs.getAllExtensions();
- final FilenameFilter mainFilter = IndexFileNameFilter.getFilter();
-
- indexFilenameFilter = new FilenameFilter() {
- public boolean accept(File dir, String name) {
- if (mainFilter.accept(dir, name)) {
- return true;
- } else {
- // See if any of the codecs claim this
- // extension:
- int i = name.lastIndexOf('.');
- if (i != -1) {
- String extension = name.substring(1+i);
- if (codecsExtensions.contains(extension)) {
- return true;
- }
- }
- return false;
- }
- }
- };
+ indexFilenameFilter = new IndexFileNameFilter(codecs);
String[] files = directory.listAll();
Index: src/java/org/apache/lucene/index/IndexFileNameFilter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexFileNameFilter.java (revision 915735)
+++ src/java/org/apache/lucene/index/IndexFileNameFilter.java (working copy)
@@ -20,28 +20,28 @@
import java.io.File;
import java.io.FilenameFilter;
import java.util.HashSet;
+import org.apache.lucene.index.codecs.Codecs;
/**
- * Filename filter that accept filenames and extensions only created by Lucene.
+ * Filename filter that accept filenames and extensions only
+ * created by Lucene.
+ *
+ * @lucene.internal
*/
-// nocommit -- make ctor that takes Codecs and expands base
-// filter to include any of their extensions?
public class IndexFileNameFilter implements FilenameFilter {
- private static IndexFileNameFilter singleton = new IndexFileNameFilter();
- private HashSet extensions;
- private HashSet extensionsInCFS;
+ private final HashSet extensions;
- // Prevent instantiation.
- private IndexFileNameFilter() {
+ public IndexFileNameFilter(Codecs codecs) {
extensions = new HashSet();
for (int i = 0; i < IndexFileNames.INDEX_EXTENSIONS.length; i++) {
extensions.add(IndexFileNames.INDEX_EXTENSIONS[i]);
}
- extensionsInCFS = new HashSet();
- for (int i = 0; i < IndexFileNames.INDEX_EXTENSIONS_IN_COMPOUND_FILE.length; i++) {
- extensionsInCFS.add(IndexFileNames.INDEX_EXTENSIONS_IN_COMPOUND_FILE[i]);
+ if (codecs != null) {
+ for(String ext : codecs.getAllExtensions()) {
+ extensions.add(ext);
+ }
}
}
@@ -67,29 +67,4 @@
}
return false;
}
-
- /**
- * Returns true if this is a file that would be contained
- * in a CFS file. This function should only be called on
- * files that pass the above "accept" (ie, are already
- * known to be a Lucene index file).
- */
- public boolean isCFSFile(String name) {
- int i = name.lastIndexOf('.');
- if (i != -1) {
- String extension = name.substring(1+i);
- if (extensionsInCFS.contains(extension)) {
- return true;
- }
- if (extension.startsWith("f") &&
- extension.matches("f\\d+")) {
- return true;
- }
- }
- return false;
- }
-
- public static IndexFileNameFilter getFilter() {
- return singleton;
- }
}
Property changes on: src/java/org/apache/lucene/index/IndexFileNameFilter.java
___________________________________________________________________
Added: svn:eol-style
+ native
Index: src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java
===================================================================
--- src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java (revision 915795)
+++ src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java (working copy)
@@ -277,7 +277,7 @@
System.out.println("pff.seek term=" + term);
}
skipNext = false;
- termEnum = getTermsDict().terms(new Term(fieldInfo.name, term.toString()));
+ termEnum = getTermsDict().terms(new Term(fieldInfo.name, term.utf8ToString()));
final Term t = termEnum.term();
final BytesRef tr;
Index: src/java/org/apache/lucene/store/Directory.java
===================================================================
--- src/java/org/apache/lucene/store/Directory.java (revision 915795)
+++ src/java/org/apache/lucene/store/Directory.java (working copy)
@@ -20,8 +20,6 @@
import java.io.IOException;
import java.io.Closeable;
-import org.apache.lucene.index.IndexFileNameFilter;
-
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
@@ -180,16 +178,9 @@
public static void copy(Directory src, Directory dest, boolean closeDirSrc) throws IOException {
final String[] files = src.listAll();
- IndexFileNameFilter filter = IndexFileNameFilter.getFilter();
-
byte[] buf = new byte[BufferedIndexOutput.BUFFER_SIZE];
for (int i = 0; i < files.length; i++) {
- if (false && !filter.accept(null, files[i])) {
- System.out.println(" filter rejects " + files[i]);
- continue;
- }
-
IndexOutput os = null;
IndexInput is = null;
try {
Index: src/java/org/apache/lucene/util/BytesRef.java
===================================================================
--- src/java/org/apache/lucene/util/BytesRef.java (revision 915795)
+++ src/java/org/apache/lucene/util/BytesRef.java (working copy)
@@ -136,11 +136,13 @@
return this.bytesEquals((BytesRef) other);
}
+ // nocommit -- catch statically all places where this is
+ // being incorrectly called & switch to utf8ToString
public String toString() {
// nocommit -- do this, to fix all places using
// toString, to use utf8ToString instead:
- //throw new RuntimeException();
- return utf8ToString();
+ throw new RuntimeException();
+ //return utf8ToString();
}
/** Interprets stored bytes as UTF8 bytes, returning the
@@ -149,7 +151,8 @@
try {
return new String(bytes, offset, length, "UTF-8");
} catch (UnsupportedEncodingException uee) {
- // should not happen
+ // should not happen -- UTF8 is presumably supported
+ // by all JREs
throw new RuntimeException(uee);
}
}
Index: contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SrndTruncQuery.java
===================================================================
--- contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SrndTruncQuery.java (revision 915795)
+++ contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SrndTruncQuery.java (working copy)
@@ -108,7 +108,7 @@
while(text != null) {
if (text != null && text.startsWith(prefixRef)) {
- String textString = text.toString();
+ String textString = text.utf8ToString();
matcher.reset(textString.substring(prefixLength));
if (matcher.matches()) {
mtv.visitMatchingTerm(new Term(fieldName, textString));
Index: contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SrndPrefixQuery.java
===================================================================
--- contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SrndPrefixQuery.java (revision 915795)
+++ contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SrndPrefixQuery.java (working copy)
@@ -70,7 +70,7 @@
expanded = true;
} else if (status == TermsEnum.SeekStatus.NOT_FOUND) {
if (termsEnum.term().startsWith(prefixRef)) {
- mtv.visitMatchingTerm(new Term(fieldName, termsEnum.term().toString()));
+ mtv.visitMatchingTerm(new Term(fieldName, termsEnum.term().utf8ToString()));
expanded = true;
} else {
skip = true;
@@ -84,7 +84,7 @@
while(true) {
BytesRef text = termsEnum.next();
if (text != null && text.startsWith(prefixRef)) {
- mtv.visitMatchingTerm(new Term(fieldName, text.toString()));
+ mtv.visitMatchingTerm(new Term(fieldName, text.utf8ToString()));
expanded = true;
} else {
break;
Index: contrib/spellchecker/src/java/org/apache/lucene/search/spell/LuceneDictionary.java
===================================================================
--- contrib/spellchecker/src/java/org/apache/lucene/search/spell/LuceneDictionary.java (revision 915795)
+++ contrib/spellchecker/src/java/org/apache/lucene/search/spell/LuceneDictionary.java (working copy)
@@ -74,7 +74,7 @@
return null;
}
- String result = pendingTerm.toString();
+ String result = pendingTerm.utf8ToString();
try {
pendingTerm = termsEnum.next();
Index: contrib/regex/src/java/org/apache/lucene/search/regex/RegexTermsEnum.java
===================================================================
--- contrib/regex/src/java/org/apache/lucene/search/regex/RegexTermsEnum.java (revision 915795)
+++ contrib/regex/src/java/org/apache/lucene/search/regex/RegexTermsEnum.java (working copy)
@@ -56,7 +56,7 @@
@Override
protected final AcceptStatus accept(BytesRef term) {
if (term.startsWith(prefixRef)) {
- return regexImpl.match(term.toString()) ? AcceptStatus.YES : AcceptStatus.NO;
+ return regexImpl.match(term.utf8ToString()) ? AcceptStatus.YES : AcceptStatus.NO;
} else {
return AcceptStatus.END;
}
Index: contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java
===================================================================
--- contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java (revision 915795)
+++ contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java (working copy)
@@ -213,7 +213,7 @@
totalVariantDocFreqs+=fe.docFreq();
float score=boostAtt.getBoost();
if (variantsQ.size() < MAX_VARIANTS_PER_TERM || score > minScore){
- ScoreTerm st=new ScoreTerm(new Term(startTerm.field(), possibleMatch.toString()),score,startTerm);
+ ScoreTerm st=new ScoreTerm(new Term(startTerm.field(), possibleMatch.utf8ToString()),score,startTerm);
variantsQ.insertWithOverflow(st);
minScore = variantsQ.top().score; // maintain minScore
}
Index: backwards/flex_1458_3_0_back_compat_tests/src/test/org/apache/lucene/index/TestDoc.java
===================================================================
--- backwards/flex_1458_3_0_back_compat_tests/src/test/org/apache/lucene/index/TestDoc.java (revision 915514)
+++ backwards/flex_1458_3_0_back_compat_tests/src/test/org/apache/lucene/index/TestDoc.java (working copy)
@@ -35,6 +35,7 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.index.codecs.Codecs;
/** JUnit adaptation of an older test case DocTest. */
@@ -180,20 +181,24 @@
SegmentReader r1 = SegmentReader.get(true, si1, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
SegmentReader r2 = SegmentReader.get(true, si2, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
- SegmentMerger merger = new SegmentMerger(si1.dir, merged);
+ SegmentMerger merger = new SegmentMerger(si1.dir, IndexWriter.DEFAULT_TERM_INDEX_INTERVAL, merged, null, Codecs.getDefault());
merger.add(r1);
merger.add(r2);
merger.merge();
merger.closeReaders();
+ final SegmentInfo info = new SegmentInfo(merged, si1.docCount + si2.docCount, si1.dir,
+ useCompoundFile, true, -1, null, false, merger.hasProx(),
+ merger.getCodec());
+
if (useCompoundFile) {
- List filesToDelete = merger.createCompoundFile(merged + ".cfs");
+ List filesToDelete = merger.createCompoundFile(merged + ".cfs", info);
for (Iterator iter = filesToDelete.iterator(); iter.hasNext();)
si1.dir.deleteFile((String) iter.next());
}
- return new SegmentInfo(merged, si1.docCount + si2.docCount, si1.dir, useCompoundFile, true);
+ return info;
}
Index: backwards/flex_1458_3_0_back_compat_tests/src/test/org/apache/lucene/index/TestSegmentMerger.java
===================================================================
--- backwards/flex_1458_3_0_back_compat_tests/src/test/org/apache/lucene/index/TestSegmentMerger.java (revision 915514)
+++ backwards/flex_1458_3_0_back_compat_tests/src/test/org/apache/lucene/index/TestSegmentMerger.java (working copy)
@@ -18,9 +18,11 @@
*/
import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.store.BufferedIndexInput;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.document.Document;
+import org.apache.lucene.index.codecs.Codecs;
import java.io.IOException;
import java.util.Collection;
@@ -63,14 +65,16 @@
}
public void testMerge() throws IOException {
- SegmentMerger merger = new SegmentMerger(mergedDir, mergedSegment);
+ SegmentMerger merger = new SegmentMerger(mergedDir, IndexWriter.DEFAULT_TERM_INDEX_INTERVAL, mergedSegment, null, Codecs.getDefault());
merger.add(reader1);
merger.add(reader2);
int docsMerged = merger.merge();
merger.closeReaders();
assertTrue(docsMerged == 2);
//Should be able to open a new SegmentReader against the new directory
- SegmentReader mergedReader = SegmentReader.get(true, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, true), IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+ SegmentReader mergedReader = SegmentReader.get(false, mergedDir, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, true,
+ -1, null, false, merger.hasProx(), merger.getCodec()), BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, null);
+
assertTrue(mergedReader != null);
assertTrue(mergedReader.numDocs() == 2);
Document newDoc1 = mergedReader.document(0);