emptyMap();
- }
+ final long checksumNow = input.getChecksum();
+ final long checksumThen = input.readLong();
+ if (checksumNow != checksumThen)
+ throw new CorruptIndexException("checksum mismatch in segments file");
- if (format <= FORMAT_CHECKSUM) {
- final long checksumNow = input.getChecksum();
- final long checksumThen = input.readLong();
- if (checksumNow != checksumThen)
- throw new CorruptIndexException("checksum mismatch in segments file");
- }
success = true;
}
finally {
@@ -327,7 +276,7 @@
// before finishCommit is called
ChecksumIndexOutput pendingSegnOutput;
- private final void write(Directory directory) throws IOException {
+ private void write(Directory directory) throws IOException {
String segmentFileName = getNextSegmentFileName();
@@ -348,8 +297,8 @@
// the index
segnOutput.writeInt(counter); // write counter
segnOutput.writeInt(size()); // write infos
- for (int i = 0; i < size(); i++) {
- info(i).write(segnOutput);
+ for (SegmentInfo si : this) {
+ si.write(segnOutput);
}
segnOutput.writeStringStringMap(userData);
segnOutput.prepareCommit();
@@ -612,7 +561,7 @@
if (genInput != null) {
try {
int version = genInput.readInt();
- if (version == FORMAT_LOCKLESS) {
+ if (version == FORMAT_SEGMENTS_GEN_CURRENT) {
long gen0 = genInput.readLong();
long gen1 = genInput.readLong();
if (infoStream != null) {
@@ -642,10 +591,7 @@
}
// Pick the larger of the two gen's:
- if (genA > genB)
- gen = genA;
- else
- gen = genB;
+ gen = Math.max(genA, genB);
if (gen == -1) {
// Neither approach found a generation
@@ -858,9 +804,7 @@
// logic in SegmentInfos to kick in and load the last
// good (previous) segments_N-1 file.
- final String fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
- "",
- generation);
+ final String fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation);
success = false;
try {
dir.sync(Collections.singleton(fileName));
@@ -880,7 +824,7 @@
try {
IndexOutput genOutput = dir.createOutput(IndexFileNames.SEGMENTS_GEN);
try {
- genOutput.writeInt(FORMAT_LOCKLESS);
+ genOutput.writeInt(FORMAT_SEGMENTS_GEN_CURRENT);
genOutput.writeLong(generation);
genOutput.writeLong(generation);
} finally {
Index: lucene/src/java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/SegmentReader.java (revision 949485)
+++ lucene/src/java/org/apache/lucene/index/SegmentReader.java (working copy)
@@ -728,7 +728,7 @@
}
if (normsDirty) { // re-write norms
- si.setNumFields(core.fieldInfos.size());
+ si.initNormGen(core.fieldInfos.size());
for (final Norm norm : norms.values()) {
if (norm.dirty) {
norm.reWrite(si);
Index: lucene/src/java/org/apache/lucene/index/TermVectorsReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/TermVectorsReader.java (revision 949485)
+++ lucene/src/java/org/apache/lucene/index/TermVectorsReader.java (working copy)
@@ -29,11 +29,7 @@
// NOTE: if you make a new format, it must be larger than
// the current format
- static final int FORMAT_VERSION = 2;
- // Changes to speed up bulk merging of term vectors:
- static final int FORMAT_VERSION2 = 3;
-
// Changed strings to UTF8 with length-in-bytes not length-in-chars
static final int FORMAT_UTF8_LENGTH_IN_BYTES = 4;
@@ -87,13 +83,8 @@
assert format == tvdFormat;
assert format == tvfFormat;
- if (format >= FORMAT_VERSION2) {
- assert (tvx.length()-FORMAT_SIZE) % 16 == 0;
- numTotalDocs = (int) (tvx.length() >> 4);
- } else {
- assert (tvx.length()-FORMAT_SIZE) % 8 == 0;
- numTotalDocs = (int) (tvx.length() >> 3);
- }
+ assert (tvx.length()-FORMAT_SIZE) % 16 == 0;
+ numTotalDocs = (int) (tvx.length() >> 4);
if (-1 == docStoreOffset) {
this.docStoreOffset = 0;
@@ -133,11 +124,8 @@
return tvf;
}
- final private void seekTvx(final int docNum) throws IOException {
- if (format < FORMAT_VERSION2)
- tvx.seek((docNum + docStoreOffset) * 8L + FORMAT_SIZE);
- else
- tvx.seek((docNum + docStoreOffset) * 16L + FORMAT_SIZE);
+ private void seekTvx(final int docNum) throws IOException {
+ tvx.seek((docNum + docStoreOffset) * 16L + FORMAT_SIZE);
}
boolean canReadRawDocs() {
@@ -160,7 +148,7 @@
// SegmentMerger calls canReadRawDocs() first and should
// not call us if that returns false.
- if (format < FORMAT_VERSION2)
+ if (format < FORMAT_UTF8_LENGTH_IN_BYTES)
throw new IllegalStateException("cannot read raw docs with older term vector formats");
seekTvx(startDocID);
@@ -242,11 +230,7 @@
int number = 0;
int found = -1;
for (int i = 0; i < fieldCount; i++) {
- if (format >= FORMAT_VERSION)
- number = tvd.readVInt();
- else
- number += tvd.readVInt();
-
+ number = tvd.readVInt();
if (number == fieldNumber)
found = i;
}
@@ -255,11 +239,7 @@
// document
if (found != -1) {
// Compute position in the tvf file
- long position;
- if (format >= FORMAT_VERSION2)
- position = tvx.readLong();
- else
- position = tvd.readVLong();
+ long position = tvx.readLong();
for (int i = 1; i <= found; i++)
position += tvd.readVLong();
@@ -292,16 +272,12 @@
// Reads the String[] fields; you have to pre-seek tvd to
// the right point
- final private String[] readFields(int fieldCount) throws IOException {
+ private String[] readFields(int fieldCount) throws IOException {
int number = 0;
String[] fields = new String[fieldCount];
for (int i = 0; i < fieldCount; i++) {
- if (format >= FORMAT_VERSION)
- number = tvd.readVInt();
- else
- number += tvd.readVInt();
-
+ number = tvd.readVInt();
fields[i] = fieldInfos.fieldName(number);
}
@@ -310,13 +286,9 @@
// Reads the long[] offsets into TVF; you have to pre-seek
// tvx/tvd to the right point
- final private long[] readTvfPointers(int fieldCount) throws IOException {
+ private long[] readTvfPointers(int fieldCount) throws IOException {
// Compute position in the tvf file
- long position;
- if (format >= FORMAT_VERSION2)
- position = tvx.readLong();
- else
- position = tvd.readVLong();
+ long position = tvx.readLong();
long[] tvfPointers = new long[fieldCount];
tvfPointers[0] = position;
@@ -425,32 +397,18 @@
boolean storePositions;
boolean storeOffsets;
- if (format >= FORMAT_VERSION){
- byte bits = tvf.readByte();
- storePositions = (bits & STORE_POSITIONS_WITH_TERMVECTOR) != 0;
- storeOffsets = (bits & STORE_OFFSET_WITH_TERMVECTOR) != 0;
- }
- else{
- tvf.readVInt();
- storePositions = false;
- storeOffsets = false;
- }
+ byte bits = tvf.readByte();
+ storePositions = (bits & STORE_POSITIONS_WITH_TERMVECTOR) != 0;
+ storeOffsets = (bits & STORE_OFFSET_WITH_TERMVECTOR) != 0;
+
mapper.setExpectations(field, numTerms, storeOffsets, storePositions);
int start = 0;
int deltaLength = 0;
int totalLength = 0;
byte[] byteBuffer;
- char[] charBuffer;
- final boolean preUTF8 = format < FORMAT_UTF8_LENGTH_IN_BYTES;
- // init the buffers
- if (preUTF8) {
- charBuffer = new char[10];
- byteBuffer = null;
- } else {
- charBuffer = null;
- byteBuffer = new byte[20];
- }
+ // init the buffer
+ byteBuffer = new byte[20];
for (int i = 0; i < numTerms; i++) {
start = tvf.readVInt();
@@ -459,26 +417,17 @@
final String term;
- if (preUTF8) {
- // Term stored as java chars
- if (charBuffer.length < totalLength) {
- charBuffer = ArrayUtil.grow(charBuffer, totalLength);
- }
- tvf.readChars(charBuffer, start, deltaLength);
- term = new String(charBuffer, 0, totalLength);
- } else {
- // Term stored as utf8 bytes
- if (byteBuffer.length < totalLength) {
- byteBuffer = ArrayUtil.grow(byteBuffer, totalLength);
- }
- tvf.readBytes(byteBuffer, start, deltaLength);
- term = new String(byteBuffer, 0, totalLength, "UTF-8");
+ // Term stored as utf8 bytes
+ if (byteBuffer.length < totalLength) {
+ byteBuffer = ArrayUtil.grow(byteBuffer, totalLength);
}
+ tvf.readBytes(byteBuffer, start, deltaLength);
+ term = new String(byteBuffer, 0, totalLength, "UTF-8");
int freq = tvf.readVInt();
int [] positions = null;
if (storePositions) { //read in the positions
//does the mapper even care about positions?
- if (mapper.isIgnoringPositions() == false) {
+ if (!mapper.isIgnoringPositions()) {
positions = new int[freq];
int prevPosition = 0;
for (int j = 0; j < freq; j++)
@@ -498,7 +447,7 @@
TermVectorOffsetInfo[] offsets = null;
if (storeOffsets) {
//does the mapper even care about offsets?
- if (mapper.isIgnoringOffsets() == false) {
+ if (!mapper.isIgnoringOffsets()) {
offsets = new TermVectorOffsetInfo[freq];
int prevOffset = 0;
for (int j = 0; j < freq; j++) {
Index: lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermEnum.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermEnum.java (revision 949485)
+++ lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermEnum.java (working copy)
@@ -36,9 +36,6 @@
long size;
long position = -1;
- /** The file format version, a negative number. */
- public static final int FORMAT = -3;
-
// Changed strings to true utf8 with length-in-bytes not
// length-in-chars
public static final int FORMAT_VERSION_UTF8_LENGTH_IN_BYTES = -4;
@@ -97,19 +94,11 @@
} else {
indexInterval = input.readInt();
skipInterval = input.readInt();
- if (format <= FORMAT) {
- // this new format introduces multi-level skipping
- maxSkipLevels = input.readInt();
- }
+ maxSkipLevels = input.readInt();
}
assert indexInterval > 0: "indexInterval=" + indexInterval + " is negative; must be > 0";
assert skipInterval > 0: "skipInterval=" + skipInterval + " is negative; must be > 0";
}
- if (format > FORMAT_VERSION_UTF8_LENGTH_IN_BYTES) {
- termBuffer.setPreUTF8Strings();
- scanBuffer.setPreUTF8Strings();
- prevBuffer.setPreUTF8Strings();
- }
}
@Override
Index: lucene/src/java/org/apache/lucene/index/codecs/preflex/TermBuffer.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/codecs/preflex/TermBuffer.java (revision 949485)
+++ lucene/src/java/org/apache/lucene/index/codecs/preflex/TermBuffer.java (working copy)
@@ -29,7 +29,6 @@
private String field;
private Term term; // cached
- private boolean preUTF8Strings; // true if strings are stored in modified UTF8 encoding (LUCENE-510)
private boolean dirty; // true if text was set externally (ie not read via UTF8 bytes)
private UnicodeUtil.UTF16Result text = new UnicodeUtil.UTF16Result();
@@ -42,8 +41,8 @@
return field.compareTo(other.field);
}
- private static final int compareChars(char[] chars1, int len1,
- char[] chars2, int len2) {
+ private static int compareChars(char[] chars1, int len1,
+ char[] chars2, int len2) {
final int end = len1 < len2 ? len1:len2;
for (int k = 0; k < end; k++) {
char c1 = chars1[k];
@@ -55,41 +54,28 @@
return len1 - len2;
}
- /** Call this if the IndexInput passed to {@link #read}
- * stores terms in the "modified UTF8" (pre LUCENE-510)
- * format. */
- void setPreUTF8Strings() {
- preUTF8Strings = true;
- }
-
public final void read(IndexInput input, FieldInfos fieldInfos)
throws IOException {
this.term = null; // invalidate cache
int start = input.readVInt();
int length = input.readVInt();
int totalLength = start + length;
- if (preUTF8Strings) {
- text.setLength(totalLength);
- input.readChars(text.result, start, length);
+ if (dirty) {
+ // Fully convert all bytes since bytes is dirty
+ UnicodeUtil.UTF16toUTF8(text.result, 0, text.length, bytes);
+ if (bytes.bytes.length < totalLength)
+ bytes.bytes = new byte[totalLength];
+ bytes.length = totalLength;
+ input.readBytes(bytes.bytes, start, length);
+ UnicodeUtil.UTF8toUTF16(bytes.bytes, 0, totalLength, text);
+ dirty = false;
} else {
-
- if (dirty) {
- // Fully convert all bytes since bytes is dirty
- UnicodeUtil.UTF16toUTF8(text.result, 0, text.length, bytes);
- if (bytes.bytes.length < totalLength)
- bytes.bytes = new byte[totalLength];
- bytes.length = totalLength;
- input.readBytes(bytes.bytes, start, length);
- UnicodeUtil.UTF8toUTF16(bytes.bytes, 0, totalLength, text);
- dirty = false;
- } else {
- // Incrementally convert only the UTF8 bytes that are new:
- if (bytes.bytes.length < totalLength)
- bytes.bytes = ArrayUtil.grow(bytes.bytes, totalLength);
- bytes.length = totalLength;
- input.readBytes(bytes.bytes, start, length);
- UnicodeUtil.UTF8toUTF16(bytes.bytes, start, length, text);
- }
+ // Incrementally convert only the UTF8 bytes that are new:
+ if (bytes.bytes.length < totalLength)
+ bytes.bytes = ArrayUtil.grow(bytes.bytes, totalLength);
+ bytes.length = totalLength;
+ input.readBytes(bytes.bytes, start, length);
+ UnicodeUtil.UTF8toUTF16(bytes.bytes, start, length, text);
}
this.field = fieldInfos.fieldName(input.readVInt());
}
Index: lucene/src/java/org/apache/lucene/store/DataInput.java
===================================================================
--- lucene/src/java/org/apache/lucene/store/DataInput.java (revision 949485)
+++ lucene/src/java/org/apache/lucene/store/DataInput.java (working copy)
@@ -29,8 +29,6 @@
* data types.
*/
public abstract class DataInput implements Cloneable {
- private boolean preUTF8Strings; // true if we are reading old (modified UTF8) string format
-
/** Reads and returns a single byte.
* @see DataOutput#writeByte(byte)
*/
@@ -114,89 +112,16 @@
return i;
}
- /** Call this if readString should read characters stored
- * in the old modified UTF8 format (length in java chars
- * and java's modified UTF8 encoding). This is used for
- * indices written pre-2.4 See LUCENE-510 for details. */
- public void setModifiedUTF8StringsMode() {
- preUTF8Strings = true;
- }
-
/** Reads a string.
* @see DataOutput#writeString(String)
*/
public String readString() throws IOException {
- if (preUTF8Strings)
- return readModifiedUTF8String();
int length = readVInt();
final byte[] bytes = new byte[length];
readBytes(bytes, 0, length);
return new String(bytes, 0, length, "UTF-8");
}
- private String readModifiedUTF8String() throws IOException {
- int length = readVInt();
- final char[] chars = new char[length];
- readChars(chars, 0, length);
- return new String(chars, 0, length);
- }
-
- /** Reads Lucene's old "modified UTF-8" encoded
- * characters into an array.
- * @param buffer the array to read characters into
- * @param start the offset in the array to start storing characters
- * @param length the number of characters to read
- * @see DataOutput#writeChars(String,int,int)
- * @deprecated -- please use readString or readBytes
- * instead, and construct the string
- * from those utf8 bytes
- */
- @Deprecated
- public void readChars(char[] buffer, int start, int length)
- throws IOException {
- final int end = start + length;
- for (int i = start; i < end; i++) {
- byte b = readByte();
- if ((b & 0x80) == 0)
- buffer[i] = (char)(b & 0x7F);
- else if ((b & 0xE0) != 0xE0) {
- buffer[i] = (char)(((b & 0x1F) << 6)
- | (readByte() & 0x3F));
- } else {
- buffer[i] = (char)(((b & 0x0F) << 12)
- | ((readByte() & 0x3F) << 6)
- | (readByte() & 0x3F));
- }
- }
- }
-
- /**
- * Expert
- *
- * Similar to {@link #readChars(char[], int, int)} but does not do any conversion operations on the bytes it is reading in. It still
- * has to invoke {@link #readByte()} just as {@link #readChars(char[], int, int)} does, but it does not need a buffer to store anything
- * and it does not have to do any of the bitwise operations, since we don't actually care what is in the byte except to determine
- * how many more bytes to read
- * @param length The number of chars to read
- * @deprecated this method operates on old "modified utf8" encoded
- * strings
- */
- @Deprecated
- public void skipChars(int length) throws IOException{
- for (int i = 0; i < length; i++) {
- byte b = readByte();
- if ((b & 0x80) == 0){
- //do nothing, we only need one byte
- } else if ((b & 0xE0) != 0xE0) {
- readByte();//read an additional byte
- } else {
- //read two additional bytes.
- readByte();
- readByte();
- }
- }
- }
-
/** Returns a clone of this stream.
*
* Clones of a stream access the same data, and are positioned at the same
Index: lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 949485)
+++ lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (working copy)
@@ -22,14 +22,11 @@
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.Random;
import java.util.Enumeration;
import java.util.List;
-import java.util.ArrayList;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
@@ -37,8 +34,6 @@
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
-import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.document.FieldSelectorResult;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.document.NumericField;
import org.apache.lucene.search.DocIdSetIterator;
@@ -50,13 +45,12 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.BytesRef;
/*
- Verify we can read the pre-2.1 file format, do searches
+ Verify we can read the pre-4.0 file format, do searches
against it, and add documents to it.
*/
@@ -128,94 +122,13 @@
}
*/
- final String[] oldNames = {"19.cfs",
- "19.nocfs",
- "20.cfs",
- "20.nocfs",
- "21.cfs",
- "21.nocfs",
- "22.cfs",
- "22.nocfs",
- "23.cfs",
- "23.nocfs",
- "24.cfs",
- "24.nocfs",
- "29.cfs",
- "29.nocfs",
- "30.cfs",
+ final String[] oldNames = {"30.cfs",
"30.nocfs",
"31.cfs",
"31.nocfs",
};
- private void assertCompressedFields29(Directory dir, boolean shouldStillBeCompressed) throws IOException {
- int count = 0;
- final int TEXT_PLAIN_LENGTH = TEXT_TO_COMPRESS.length() * 2;
- // FieldSelectorResult.SIZE returns 2*number_of_chars for String fields:
- final int BINARY_PLAIN_LENGTH = BINARY_TO_COMPRESS.length;
-
- IndexReader reader = IndexReader.open(dir, true);
- try {
- // look into sub readers and check if raw merge is on/off
- List readers = new ArrayList();
- ReaderUtil.gatherSubReaders(readers, reader);
- for (IndexReader ir : readers) {
- final FieldsReader fr = ((SegmentReader) ir).getFieldsReader();
- assertTrue("for a 2.9 index, FieldsReader.canReadRawDocs() must be false and other way round for a trunk index",
- shouldStillBeCompressed != fr.canReadRawDocs());
- }
-
- // test that decompression works correctly
- for(int i=0; i 0;
- final int shouldSize = shouldStillBeCompressed ?
- compressedSize :
- (binary ? BINARY_PLAIN_LENGTH : TEXT_PLAIN_LENGTH);
- assertEquals("size incorrect", shouldSize, actualSize);
- if (!shouldStillBeCompressed) {
- assertFalse("uncompressed field should have another size than recorded in index", compressedSize == actualSize);
- }
- }
- }
- assertEquals("correct number of tests", 34 * 2, count);
- } finally {
- reader.close();
- }
- }
-
public void testOptimizeOldIndex() throws Exception {
- int hasTested29 = 0;
-
Random rand = newRandom();
for(int i=0;i= 3.0
- if (oldNames[i].compareTo("30.") < 0) continue;
unzip(getDataFile("index." + oldNames[i] + ".zip"), oldNames[i]);
String fullPath = fullDir(oldNames[i]);
Index: lucene/src/test/org/apache/lucene/index/TestCodecs.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestCodecs.java (revision 949485)
+++ lucene/src/test/org/apache/lucene/index/TestCodecs.java (working copy)
@@ -281,7 +281,7 @@
final Directory dir = new MockRAMDirectory();
this.write(fieldInfos, dir, fields);
- final SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, CodecProvider.getDefault().getWriter(null));
+ final SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, false, -1, SEGMENT, false, true, CodecProvider.getDefault().getWriter(null));
si.setHasProx(false);
final FieldsProducer reader = si.getCodec().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, 64, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR));
@@ -319,7 +319,7 @@
final Directory dir = new MockRAMDirectory();
this.write(fieldInfos, dir, fields);
- final SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, CodecProvider.getDefault().getWriter(null));
+ final SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, false, -1, SEGMENT, false, true, CodecProvider.getDefault().getWriter(null));
final FieldsProducer terms = si.getCodec().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, 1024, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR));
Index: lucene/src/test/org/apache/lucene/index/TestDoc.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestDoc.java (revision 949485)
+++ lucene/src/test/org/apache/lucene/index/TestDoc.java (working copy)
@@ -194,8 +194,7 @@
merger.closeReaders();
final SegmentInfo info = new SegmentInfo(merged, si1.docCount + si2.docCount, si1.dir,
- useCompoundFile, true, -1, null, false, merger.hasProx(),
- merger.getCodec());
+ useCompoundFile, -1, null, false, merger.hasProx(), merger.getCodec());
if (useCompoundFile) {
List filesToDelete = merger.createCompoundFile(merged + ".cfs", info);
Index: lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java (revision 949485)
+++ lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java (working copy)
@@ -134,9 +134,6 @@
// Create a bogus fnm file when the CFS already exists:
copyFile(dir, "_0.cfs", "_0.fnm");
- // Create a deletable file:
- copyFile(dir, "_0.cfs", "deletable");
-
// Create some old segments file:
copyFile(dir, "segments_2", "segments");
copyFile(dir, "segments_2", "segments_1");
Index: lucene/src/test/org/apache/lucene/index/TestIndexInput.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexInput.java (revision 949485)
+++ lucene/src/test/org/apache/lucene/index/TestIndexInput.java (working copy)
@@ -79,44 +79,4 @@
assertEquals("\u0000",is.readString());
assertEquals("Lu\u0000ce\u0000ne",is.readString());
}
-
- /**
- * Expert
- *
- * @throws IOException
- */
- public void testSkipChars() throws IOException {
- byte[] bytes = new byte[]{(byte) 0x80, 0x01,
- (byte) 0xFF, 0x7F,
- (byte) 0x80, (byte) 0x80, 0x01,
- (byte) 0x81, (byte) 0x80, 0x01,
- 0x06, 'L', 'u', 'c', 'e', 'n', 'e',
- };
- String utf8Str = "\u0634\u1ea1";
- byte [] utf8Bytes = utf8Str.getBytes("UTF-8");
- byte [] theBytes = new byte[bytes.length + 1 + utf8Bytes.length];
- System.arraycopy(bytes, 0, theBytes, 0, bytes.length);
- theBytes[bytes.length] = (byte)utf8Str.length();//Add in the number of chars we are storing, which should fit in a byte for this test
- System.arraycopy(utf8Bytes, 0, theBytes, bytes.length + 1, utf8Bytes.length);
- IndexInput is = new MockIndexInput(theBytes);
- assertEquals(128, is.readVInt());
- assertEquals(16383, is.readVInt());
- assertEquals(16384, is.readVInt());
- assertEquals(16385, is.readVInt());
- int charsToRead = is.readVInt();//number of chars in the Lucene string
- assertTrue(0x06 + " does not equal: " + charsToRead, 0x06 == charsToRead);
- is.skipChars(3);
- char [] chars = new char[3];//there should be 6 chars remaining
- is.readChars(chars, 0, 3);
- String tmpStr = new String(chars);
- assertTrue(tmpStr + " is not equal to " + "ene", tmpStr.equals("ene" ) == true);
- //Now read the UTF8 stuff
- charsToRead = is.readVInt() - 1;//since we are skipping one
- is.skipChars(1);
- assertTrue(utf8Str.length() - 1 + " does not equal: " + charsToRead, utf8Str.length() - 1 == charsToRead);
- chars = new char[charsToRead];
- is.readChars(chars, 0, charsToRead);
- tmpStr = new String(chars);
- assertTrue(tmpStr + " is not equal to " + utf8Str.substring(1), tmpStr.equals(utf8Str.substring(1)) == true);
- }
}
Index: lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java (revision 949485)
+++ lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java (working copy)
@@ -72,8 +72,8 @@
merger.closeReaders();
assertTrue(docsMerged == 2);
//Should be able to open a new SegmentReader against the new directory
- SegmentReader mergedReader = SegmentReader.get(false, mergedDir, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, true,
- -1, null, false, merger.hasProx(), merger.getCodec()), BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, null);
+ SegmentReader mergedReader = SegmentReader.get(false, mergedDir, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, -1,
+ null, false, merger.hasProx(), merger.getCodec()), BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, null);
assertTrue(mergedReader != null);
assertTrue(mergedReader.numDocs() == 2);
Index: lucene/src/test/org/apache/lucene/index/index.19.cfs.zip
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: lucene/src/test/org/apache/lucene/index/index.19.nocfs.zip
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: lucene/src/test/org/apache/lucene/index/index.20.cfs.zip
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: lucene/src/test/org/apache/lucene/index/index.20.nocfs.zip
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: lucene/src/test/org/apache/lucene/index/index.21.cfs.zip
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: lucene/src/test/org/apache/lucene/index/index.21.nocfs.zip
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: lucene/src/test/org/apache/lucene/index/index.22.cfs.zip
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: lucene/src/test/org/apache/lucene/index/index.22.nocfs.zip
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: lucene/src/test/org/apache/lucene/index/index.23.cfs.zip
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: lucene/src/test/org/apache/lucene/index/index.23.nocfs.zip
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: lucene/src/test/org/apache/lucene/index/index.24.cfs.zip
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: lucene/src/test/org/apache/lucene/index/index.24.nocfs.zip
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: lucene/src/test/org/apache/lucene/index/index.29.cfs.zip
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Index: lucene/src/test/org/apache/lucene/index/index.29.nocfs.zip
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream