Index: CHANGES.txt
===================================================================
--- CHANGES.txt (revision 517305)
+++ CHANGES.txt (working copy)
@@ -20,6 +20,11 @@
classes, package-private again (they were unnecessarily made public
as part of LUCENE-701). (Mike McCandless)
+ 3. LUCENE-818: changed most public methods of IndexWriter,
+ IndexReader (and its subclasses), FieldsReader and RAMDirectory to
+ throw AlreadyClosedException if they are accessed after being
+ closed. (Mike McCandless)
+
Bug fixes
1. LUCENE-804: Fixed build.xml to pack a fully compilable src dist. (Doron Cohen)
Index: src/test/org/apache/lucene/index/TestMultiReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestMultiReader.java (revision 517305)
+++ src/test/org/apache/lucene/index/TestMultiReader.java (working copy)
@@ -85,15 +85,18 @@
assertEquals( 2, reader.numDocs() );
// Ensure undeleteAll survives commit/close/reopen:
- reader.commit();
reader.close();
sis.read(dir);
+
+ // Must re-open the readers from setUp():
+ readers[0] = SegmentReader.get(new SegmentInfo("seg-1", 1, dir));
+ readers[1] = SegmentReader.get(new SegmentInfo("seg-2", 1, dir));
+
reader = new MultiReader(dir, sis, false, readers);
assertEquals( 2, reader.numDocs() );
reader.deleteDocument(0);
assertEquals( 1, reader.numDocs() );
- reader.commit();
reader.close();
sis.read(dir);
reader = new MultiReader(dir, sis, false, readers);
Index: src/test/org/apache/lucene/index/TestFieldsReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestFieldsReader.java (revision 517305)
+++ src/test/org/apache/lucene/index/TestFieldsReader.java (working copy)
@@ -23,6 +23,7 @@
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util._TestUtil;
import java.io.File;
@@ -133,6 +134,36 @@
}
}
+ public void testLazyFieldsAfterClose() throws Exception {
+ assertTrue(dir != null);
+ assertTrue(fieldInfos != null);
+ FieldsReader reader = new FieldsReader(dir, "test", fieldInfos);
+ assertTrue(reader != null);
+ assertTrue(reader.size() == 1);
+ Set loadFieldNames = new HashSet();
+ loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY);
+ loadFieldNames.add(DocHelper.TEXT_FIELD_UTF1_KEY);
+ Set lazyFieldNames = new HashSet();
+ lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
+ lazyFieldNames.add(DocHelper.LAZY_FIELD_KEY);
+ lazyFieldNames.add(DocHelper.LAZY_FIELD_BINARY_KEY);
+ lazyFieldNames.add(DocHelper.TEXT_FIELD_UTF2_KEY);
+ lazyFieldNames.add(DocHelper.COMPRESSED_TEXT_FIELD_2_KEY);
+ SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(loadFieldNames, lazyFieldNames);
+ Document doc = reader.doc(0, fieldSelector);
+ assertTrue("doc is null and it shouldn't be", doc != null);
+ Fieldable field = doc.getFieldable(DocHelper.LAZY_FIELD_KEY);
+ assertTrue("field is null and it shouldn't be", field != null);
+ assertTrue("field is not lazy and it should be", field.isLazy());
+ reader.close();
+ try {
+ String value = field.stringValue();
+ fail("did not hit AlreadyClosedException as expected");
+ } catch (AlreadyClosedException e) {
+ // expected
+ }
+ }
+
public void testLoadFirst() throws Exception {
assertTrue(dir != null);
assertTrue(fieldInfos != null);
Index: src/test/org/apache/lucene/index/TestIndexReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReader.java (revision 517305)
+++ src/test/org/apache/lucene/index/TestIndexReader.java (working copy)
@@ -26,6 +26,7 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
@@ -279,21 +280,21 @@
try {
reader.deleteDocument(4);
fail("deleteDocument after close failed to throw IOException");
- } catch (IOException e) {
+ } catch (AlreadyClosedException e) {
// expected
}
try {
reader.setNorm(5, "aaa", 2.0f);
fail("setNorm after close failed to throw IOException");
- } catch (IOException e) {
+ } catch (AlreadyClosedException e) {
// expected
}
try {
reader.undeleteAll();
fail("undeleteAll after close failed to throw IOException");
- } catch (IOException e) {
+ } catch (AlreadyClosedException e) {
// expected
}
}
Index: src/test/org/apache/lucene/index/TestIndexWriter.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriter.java (revision 517305)
+++ src/test/org/apache/lucene/index/TestIndexWriter.java (working copy)
@@ -19,6 +19,7 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.LockFactory;
@@ -654,6 +655,25 @@
}
}
+ public void testChangesAfterClose() throws IOException {
+ Directory dir = new RAMDirectory();
+
+ IndexWriter writer = null;
+
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
+ addDoc(writer);
+
+ // close
+ writer.close();
+ try {
+ addDoc(writer);
+ fail("did not hit AlreadyClosedException");
+ } catch (AlreadyClosedException e) {
+ // expected
+ }
+ }
+
+
// Simulate a corrupt index by removing one of the cfs
// files and make sure we get an IOException trying to
// open the index:
@@ -723,7 +743,6 @@
IndexSearcher searcher = new IndexSearcher(dir);
Hits hits = searcher.search(new TermQuery(searchTerm));
assertEquals("did not get right number of hits", 100, hits.length());
- writer.close();
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
writer.close();
Index: src/java/org/apache/lucene/index/MultiReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiReader.java (revision 517305)
+++ src/java/org/apache/lucene/index/MultiReader.java (working copy)
@@ -72,24 +72,21 @@
}
- /** Return an array of term frequency vectors for the specified document.
- * The array contains a vector for each vectorized field in the document.
- * Each vector vector contains term numbers and frequencies for all terms
- * in a given vectorized field.
- * If no such fields existed, the method returns null.
- */
public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
+ ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
}
public TermFreqVector getTermFreqVector(int n, String field)
throws IOException {
+ ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVector(n - starts[i], field);
}
public synchronized int numDocs() {
+ // Don't call ensureOpen() here (it could affect performance)
if (numDocs == -1) { // check cache
int n = 0; // cache miss--recompute
for (int i = 0; i < subReaders.length; i++)
@@ -100,21 +97,27 @@
}
public int maxDoc() {
+ // Don't call ensureOpen() here (it could affect performance)
return maxDoc;
}
// inherit javadoc
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
+ ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader
}
public boolean isDeleted(int n) {
+ // Don't call ensureOpen() here (it could affect performance)
int i = readerIndex(n); // find segment num
return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader
}
- public boolean hasDeletions() { return hasDeletions; }
+ public boolean hasDeletions() {
+ // Don't call ensureOpen() here (it could affect performance)
+ return hasDeletions;
+ }
protected void doDelete(int n) throws CorruptIndexException, IOException {
numDocs = -1; // invalidate cache
@@ -153,6 +156,7 @@
}
public boolean hasNorms(String field) throws IOException {
+ ensureOpen();
for (int i = 0; i < subReaders.length; i++) {
if (subReaders[i].hasNorms(field)) return true;
}
@@ -166,6 +170,7 @@
}
public synchronized byte[] norms(String field) throws IOException {
+ ensureOpen();
byte[] bytes = (byte[])normsCache.get(field);
if (bytes != null)
return bytes; // cache hit
@@ -181,6 +186,7 @@
public synchronized void norms(String field, byte[] result, int offset)
throws IOException {
+ ensureOpen();
byte[] bytes = (byte[])normsCache.get(field);
if (bytes==null && !hasNorms(field)) bytes=fakeNorms();
if (bytes != null) // cache hit
@@ -198,14 +204,17 @@
}
public TermEnum terms() throws IOException {
+ ensureOpen();
return new MultiTermEnum(subReaders, starts, null);
}
public TermEnum terms(Term term) throws IOException {
+ ensureOpen();
return new MultiTermEnum(subReaders, starts, term);
}
public int docFreq(Term t) throws IOException {
+ ensureOpen();
int total = 0; // sum freqs in segments
for (int i = 0; i < subReaders.length; i++)
total += subReaders[i].docFreq(t);
@@ -213,10 +222,12 @@
}
public TermDocs termDocs() throws IOException {
+ ensureOpen();
return new MultiTermDocs(subReaders, starts);
}
public TermPositions termPositions() throws IOException {
+ ensureOpen();
return new MultiTermPositions(subReaders, starts);
}
@@ -251,11 +262,9 @@
subReaders[i].close();
}
- /**
- * @see IndexReader#getFieldNames(IndexReader.FieldOption)
- */
public Collection getFieldNames (IndexReader.FieldOption fieldNames) {
// maintain a unique set of field names
+ ensureOpen();
Set fieldSet = new HashSet();
for (int i = 0; i < subReaders.length; i++) {
IndexReader reader = subReaders[i];
Index: src/java/org/apache/lucene/index/FieldsReader.java
===================================================================
--- src/java/org/apache/lucene/index/FieldsReader.java (revision 517305)
+++ src/java/org/apache/lucene/index/FieldsReader.java (working copy)
@@ -20,6 +20,7 @@
import org.apache.lucene.document.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.AlreadyClosedException;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
@@ -46,6 +47,7 @@
private final IndexInput indexStream;
private int size;
+ private boolean closed;
private ThreadLocal fieldsStreamTL = new ThreadLocal();
@@ -59,19 +61,31 @@
}
/**
+ * @throws AlreadyClosedException if this FieldsReader is closed
+ */
+ protected final void ensureOpen() throws AlreadyClosedException {
+ if (closed) {
+ throw new AlreadyClosedException("this FieldsReader is closed");
+ }
+ }
+
+ /**
* Closes the underlying {@link org.apache.lucene.store.IndexInput} streams, including any ones associated with a
* lazy implementation of a Field. This means that the Fields values will not be accessible.
*
* @throws IOException
*/
final void close() throws IOException {
- fieldsStream.close();
- cloneableFieldsStream.close();
- indexStream.close();
- IndexInput localFieldsStream = (IndexInput) fieldsStreamTL.get();
- if (localFieldsStream != null) {
- localFieldsStream.close();
- fieldsStreamTL.set(null);
+ if (!closed) {
+ fieldsStream.close();
+ cloneableFieldsStream.close();
+ indexStream.close();
+ IndexInput localFieldsStream = (IndexInput) fieldsStreamTL.get();
+ if (localFieldsStream != null) {
+ localFieldsStream.close();
+ fieldsStreamTL.set(null);
+ }
+ closed = true;
}
}
@@ -323,6 +337,7 @@
* binaryValue() must be set.
*/
public byte[] binaryValue() {
+ ensureOpen();
if (fieldsData == null) {
final byte[] b = new byte[toRead];
IndexInput localFieldsStream = getFieldStream();
@@ -349,6 +364,7 @@
* and binaryValue() must be set.
*/
public Reader readerValue() {
+ ensureOpen();
return fieldsData instanceof Reader ? (Reader) fieldsData : null;
}
@@ -358,6 +374,7 @@
* binaryValue() must be set.
*/
public String stringValue() {
+ ensureOpen();
if (fieldsData == null) {
IndexInput localFieldsStream = getFieldStream();
try {
@@ -380,18 +397,22 @@
}
public long getPointer() {
+ ensureOpen();
return pointer;
}
public void setPointer(long pointer) {
+ ensureOpen();
this.pointer = pointer;
}
public int getToRead() {
+ ensureOpen();
return toRead;
}
public void setToRead(int toRead) {
+ ensureOpen();
this.toRead = toRead;
}
}
Index: src/java/org/apache/lucene/index/IndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/IndexReader.java (revision 517305)
+++ src/java/org/apache/lucene/index/IndexReader.java (working copy)
@@ -25,6 +25,7 @@
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.store.AlreadyClosedException;
import java.io.File;
import java.io.FileOutputStream;
@@ -115,8 +116,17 @@
private boolean directoryOwner;
private boolean closeDirectory;
protected IndexFileDeleter deleter;
- private boolean isClosed;
+ private boolean closed;
+ /**
+ * @throws AlreadyClosedException if this IndexReader is closed
+ */
+ protected final void ensureOpen() throws AlreadyClosedException {
+ if (closed) {
+ throw new AlreadyClosedException("this IndexReader is closed");
+ }
+ }
+
private SegmentInfos segmentInfos;
private Lock writeLock;
private boolean stale;
@@ -190,8 +200,12 @@
}.run();
}
- /** Returns the directory this index resides in. */
- public Directory directory() { return directory; }
+ /** Returns the directory this index resides in.
+ */
+ public Directory directory() {
+ ensureOpen();
+ return directory;
+ }
/**
* Returns the time the index in the named directory was last modified.
@@ -283,6 +297,7 @@
* Version number when this IndexReader was opened.
*/
public long getVersion() {
+ ensureOpen();
return segmentInfos.getVersion();
}
@@ -295,6 +310,7 @@
* @throws IOException if there is a low-level IO error
*/
public boolean isCurrent() throws CorruptIndexException, IOException {
+ ensureOpen();
return SegmentInfos.readCurrentVersion(directory) == segmentInfos.getVersion();
}
@@ -303,7 +319,8 @@
* @return true if the index is optimized; false otherwise
*/
public boolean isOptimized() {
- return segmentInfos.size() == 1 && hasDeletions() == false;
+ ensureOpen();
+ return segmentInfos.size() == 1 && hasDeletions() == false;
}
/**
@@ -389,6 +406,7 @@
* @throws IOException if there is a low-level IO error
*/
public Document document(int n) throws CorruptIndexException, IOException {
+ ensureOpen();
return document(n, null);
}
@@ -427,6 +445,7 @@
public boolean hasNorms(String field) throws IOException {
// backward compatible implementation.
// SegmentReader has an efficient implementation.
+ ensureOpen();
return norms(field) != null;
}
@@ -459,11 +478,11 @@
* @throws LockObtainFailedException if another writer
* has this index open (write.lock could not
* be obtained)
- * @throws IOException if this reader was closed already
- * or there is a low-level IO error
+ * @throws IOException if there is a low-level IO error
*/
public final synchronized void setNorm(int doc, String field, byte value)
throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
+ ensureOpen();
if(directoryOwner)
acquireWriteLock();
hasChanges = true;
@@ -486,27 +505,31 @@
* @throws LockObtainFailedException if another writer
* has this index open (write.lock could not
* be obtained)
- * @throws IOException if this reader was closed already
- * or there is a low-level IO error
+ * @throws IOException if there is a low-level IO error
*/
public void setNorm(int doc, String field, float value)
throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
+ ensureOpen();
setNorm(doc, field, Similarity.encodeNorm(value));
}
/** Returns an enumeration of all the terms in the index.
* The enumeration is ordered by Term.compareTo(). Each term
* is greater than all that precede it in the enumeration.
+ * @throws IOException if there is a low-level IO error
*/
public abstract TermEnum terms() throws IOException;
/** Returns an enumeration of all terms after a given term.
* The enumeration is ordered by Term.compareTo(). Each term
* is greater than all that precede it in the enumeration.
+ * @throws IOException if there is a low-level IO error
*/
public abstract TermEnum terms(Term t) throws IOException;
- /** Returns the number of documents containing the term t. */
+ /** Returns the number of documents containing the term t.
+ * @throws IOException if there is a low-level IO error
+ */
public abstract int docFreq(Term t) throws IOException;
/** Returns an enumeration of all the documents which contain
@@ -518,14 +541,18 @@
*
*
The enumeration is ordered by document number. Each document number * is greater than all that precede it in the enumeration. + * @throws IOException if there is a low-level IO error */ public TermDocs termDocs(Term term) throws IOException { + ensureOpen(); TermDocs termDocs = termDocs(); termDocs.seek(term); return termDocs; } - /** Returns an unpositioned {@link TermDocs} enumerator. */ + /** Returns an unpositioned {@link TermDocs} enumerator. + * @throws IOException if there is a low-level IO error + */ public abstract TermDocs termDocs() throws IOException; /** Returns an enumeration of all the documents which contain @@ -543,14 +570,18 @@ *
This positional information faciliates phrase and proximity searching. *
The enumeration is ordered by document number. Each document number is
* greater than all that precede it in the enumeration.
+ * @throws IOException if there is a low-level IO error
*/
public TermPositions termPositions(Term term) throws IOException {
+ ensureOpen();
TermPositions termPositions = termPositions();
termPositions.seek(term);
return termPositions;
}
- /** Returns an unpositioned {@link TermPositions} enumerator. */
+ /** Returns an unpositioned {@link TermPositions} enumerator.
+ * @throws IOException if there is a low-level IO error
+ */
public abstract TermPositions termPositions() throws IOException;
/**
@@ -566,10 +597,9 @@
* @throws IOException if there is a low-level IO error
*/
private void acquireWriteLock() throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
+ ensureOpen();
if (stale)
throw new StaleReaderException("IndexReader out of date and no longer valid for delete, undelete, or setNorm operations");
- if (isClosed)
- throw new IOException("this reader is closed");
if (writeLock == null) {
Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME);
@@ -602,10 +632,10 @@
* @throws LockObtainFailedException if another writer
* has this index open (write.lock could not
* be obtained)
- * @throws IOException if this reader was closed already
- * or there is a low-level IO error
+ * @throws IOException if there is a low-level IO error
*/
public final synchronized void deleteDocument(int docNum) throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
+ ensureOpen();
if(directoryOwner)
acquireWriteLock();
hasChanges = true;
@@ -634,10 +664,10 @@
* @throws LockObtainFailedException if another writer
* has this index open (write.lock could not
* be obtained)
- * @throws IOException if this reader was closed already
- * or there is a low-level IO error
+ * @throws IOException if there is a low-level IO error
*/
public final int deleteDocuments(Term term) throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
+ ensureOpen();
TermDocs docs = termDocs(term);
if (docs == null) return 0;
int n = 0;
@@ -660,10 +690,10 @@
* has this index open (write.lock could not
* be obtained)
* @throws CorruptIndexException if the index is corrupt
- * @throws IOException if this reader was closed already
- * or there is a low-level IO error
+ * @throws IOException if there is a low-level IO error
*/
public final synchronized void undeleteAll() throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
+ ensureOpen();
if(directoryOwner)
acquireWriteLock();
hasChanges = true;
@@ -793,20 +823,17 @@
* Closes files associated with this index.
* Also saves any new deletions to disk.
* No other methods should be called after this has been called.
- * @throws IOException if this reader was closed already
- * or there is a low-level IO error
+ * @throws IOException if there is a low-level IO error
*/
public final synchronized void close() throws IOException {
- if (directoryOwner && isClosed) {
- throw new IOException("this reader is already closed");
+ if (!closed) {
+ commit();
+ doClose();
+ if (directoryOwner)
+ closed = true;
+ if(closeDirectory)
+ directory.close();
}
- commit();
- doClose();
- if(closeDirectory)
- directory.close();
- if (directoryOwner) {
- isClosed = true;
- }
}
/** Implements close. */
Index: src/java/org/apache/lucene/index/FilterIndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/FilterIndexReader.java (revision 517305)
+++ src/java/org/apache/lucene/index/FilterIndexReader.java (working copy)
@@ -92,43 +92,84 @@
public TermFreqVector[] getTermFreqVectors(int docNumber)
throws IOException {
+ ensureOpen();
return in.getTermFreqVectors(docNumber);
}
public TermFreqVector getTermFreqVector(int docNumber, String field)
throws IOException {
+ ensureOpen();
return in.getTermFreqVector(docNumber, field);
}
- public int numDocs() { return in.numDocs(); }
- public int maxDoc() { return in.maxDoc(); }
+ public int numDocs() {
+ // Don't call ensureOpen() here (it could affect performance)
+ return in.numDocs();
+ }
- public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException { return in.document(n, fieldSelector); }
+ public int maxDoc() {
+ // Don't call ensureOpen() here (it could affect performance)
+ return in.maxDoc();
+ }
- public boolean isDeleted(int n) { return in.isDeleted(n); }
- public boolean hasDeletions() { return in.hasDeletions(); }
+ public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
+ ensureOpen();
+ return in.document(n, fieldSelector);
+ }
+
+ public boolean isDeleted(int n) {
+ // Don't call ensureOpen() here (it could affect performance)
+ return in.isDeleted(n);
+ }
+
+ public boolean hasDeletions() {
+ // Don't call ensureOpen() here (it could affect performance)
+ return in.hasDeletions();
+ }
+
protected void doUndeleteAll() throws CorruptIndexException, IOException {in.undeleteAll();}
public boolean hasNorms(String field) throws IOException {
+ ensureOpen();
return in.hasNorms(field);
}
- public byte[] norms(String f) throws IOException { return in.norms(f); }
+ public byte[] norms(String f) throws IOException {
+ ensureOpen();
+ return in.norms(f);
+ }
+
public void norms(String f, byte[] bytes, int offset) throws IOException {
+ ensureOpen();
in.norms(f, bytes, offset);
}
+
protected void doSetNorm(int d, String f, byte b) throws CorruptIndexException, IOException {
in.setNorm(d, f, b);
}
- public TermEnum terms() throws IOException { return in.terms(); }
- public TermEnum terms(Term t) throws IOException { return in.terms(t); }
+ public TermEnum terms() throws IOException {
+ ensureOpen();
+ return in.terms();
+ }
- public int docFreq(Term t) throws IOException { return in.docFreq(t); }
+ public TermEnum terms(Term t) throws IOException {
+ ensureOpen();
+ return in.terms(t);
+ }
- public TermDocs termDocs() throws IOException { return in.termDocs(); }
+ public int docFreq(Term t) throws IOException {
+ ensureOpen();
+ return in.docFreq(t);
+ }
+ public TermDocs termDocs() throws IOException {
+ ensureOpen();
+ return in.termDocs();
+ }
+
public TermPositions termPositions() throws IOException {
+ ensureOpen();
return in.termPositions();
}
@@ -138,9 +179,17 @@
public Collection getFieldNames(IndexReader.FieldOption fieldNames) {
+ ensureOpen();
return in.getFieldNames(fieldNames);
}
- public long getVersion() { return in.getVersion(); }
- public boolean isCurrent() throws CorruptIndexException, IOException { return in.isCurrent(); }
+ public long getVersion() {
+ ensureOpen();
+ return in.getVersion();
+ }
+
+ public boolean isCurrent() throws CorruptIndexException, IOException {
+ ensureOpen();
+ return in.isCurrent();
+ }
}
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java (revision 517305)
+++ src/java/org/apache/lucene/index/IndexWriter.java (working copy)
@@ -24,6 +24,7 @@
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.RAMDirectory;
import java.io.File;
@@ -151,13 +152,24 @@
private boolean useCompoundFile = true;
private boolean closeDir;
+ private boolean closed;
+ /**
+ * @throws AlreadyClosedException if this IndexWriter is closed
+ */
+ protected final void ensureOpen() throws AlreadyClosedException {
+ if (closed) {
+ throw new AlreadyClosedException("this IndexWriter is closed");
+ }
+ }
+
/** Get the current setting of whether to use the compound file format.
* Note that this just returns the value you set with setUseCompoundFile(boolean)
* or the default. You cannot use this to query the status of an existing index.
* @see #setUseCompoundFile(boolean)
*/
public boolean getUseCompoundFile() {
+ ensureOpen();
return useCompoundFile;
}
@@ -166,6 +178,7 @@
* is finished. This is done regardless of what directory is in use.
*/
public void setUseCompoundFile(boolean value) {
+ ensureOpen();
useCompoundFile = value;
}
@@ -174,6 +187,7 @@
* @see Similarity#setDefault(Similarity)
*/
public void setSimilarity(Similarity similarity) {
+ ensureOpen();
this.similarity = similarity;
}
@@ -182,6 +196,7 @@
*
This defaults to the current value of {@link Similarity#getDefault()}.
*/
public Similarity getSimilarity() {
+ ensureOpen();
return this.similarity;
}
@@ -207,6 +222,7 @@
* @see #DEFAULT_TERM_INDEX_INTERVAL
*/
public void setTermIndexInterval(int interval) {
+ ensureOpen();
this.termIndexInterval = interval;
}
@@ -214,7 +230,10 @@
*
* @see #setTermIndexInterval(int)
*/
- public int getTermIndexInterval() { return termIndexInterval; }
+ public int getTermIndexInterval() {
+ ensureOpen();
+ return termIndexInterval;
+ }
/**
* Constructs an IndexWriter for the index in path.
@@ -432,6 +451,7 @@
*
The default value is {@link Integer#MAX_VALUE}. */ public void setMaxMergeDocs(int maxMergeDocs) { + ensureOpen(); this.maxMergeDocs = maxMergeDocs; } @@ -439,6 +459,7 @@ * @see #setMaxMergeDocs */ public int getMaxMergeDocs() { + ensureOpen(); return maxMergeDocs; } @@ -455,6 +476,7 @@ * By default, no more than 10,000 terms will be indexed for a field. */ public void setMaxFieldLength(int maxFieldLength) { + ensureOpen(); this.maxFieldLength = maxFieldLength; } @@ -462,6 +484,7 @@ * @see #setMaxFieldLength */ public int getMaxFieldLength() { + ensureOpen(); return maxFieldLength; } @@ -476,6 +499,7 @@ * @throws IllegalArgumentException if maxBufferedDocs is smaller than 2 */ public void setMaxBufferedDocs(int maxBufferedDocs) { + ensureOpen(); if (maxBufferedDocs < 2) throw new IllegalArgumentException("maxBufferedDocs must at least be 2"); this.minMergeDocs = maxBufferedDocs; @@ -485,6 +509,7 @@ * @see #setMaxBufferedDocs */ public int getMaxBufferedDocs() { + ensureOpen(); return minMergeDocs; } @@ -498,6 +523,7 @@ * @throws IllegalArgumentException if maxBufferedDeleteTerms is smaller than 1
*/ public void setMaxBufferedDeleteTerms(int maxBufferedDeleteTerms) { + ensureOpen(); if (maxBufferedDeleteTerms < 1) throw new IllegalArgumentException("maxBufferedDeleteTerms must at least be 1"); this.maxBufferedDeleteTerms = maxBufferedDeleteTerms; @@ -507,6 +533,7 @@ * @see #setMaxBufferedDeleteTerms */ public int getMaxBufferedDeleteTerms() { + ensureOpen(); return maxBufferedDeleteTerms; } @@ -521,6 +548,7 @@ *This must never be less than 2. The default value is 10. */ public void setMergeFactor(int mergeFactor) { + ensureOpen(); if (mergeFactor < 2) throw new IllegalArgumentException("mergeFactor cannot be less than 2"); this.mergeFactor = mergeFactor; @@ -530,6 +558,7 @@ * @see #setMergeFactor */ public int getMergeFactor() { + ensureOpen(); return mergeFactor; } @@ -537,6 +566,7 @@ * maxFieldLength is reached will be printed to this. */ public void setInfoStream(PrintStream infoStream) { + ensureOpen(); this.infoStream = infoStream; } @@ -544,6 +574,7 @@ * @see #setInfoStream */ public PrintStream getInfoStream() { + ensureOpen(); return infoStream; } @@ -552,6 +583,7 @@ * @see #setDefaultWriteLockTimeout to change the default value for all instances of IndexWriter. */ public void setWriteLockTimeout(long writeLockTimeout) { + ensureOpen(); this.writeLockTimeout = writeLockTimeout; } @@ -559,6 +591,7 @@ * @see #setWriteLockTimeout */ public long getWriteLockTimeout() { + ensureOpen(); return writeLockTimeout; } @@ -612,14 +645,17 @@ * @throws IOException if there is a low-level IO error */ public synchronized void close() throws CorruptIndexException, IOException { - flushRamSegments(); - ramDirectory.close(); - if (writeLock != null) { - writeLock.release(); // release write lock - writeLock = null; + if (!closed) { + flushRamSegments(); + ramDirectory.close(); + if (writeLock != null) { + writeLock.release(); // release write lock + writeLock = null; + } + closed = true; + if(closeDir) + directory.close(); } - if(closeDir) - directory.close(); } /** Release the write lock, if needed. */ @@ -636,17 +672,20 @@ /** Returns the Directory used by this index. */ public Directory getDirectory() { - return directory; + ensureOpen(); + return directory; } /** Returns the analyzer used by this index. */ public Analyzer getAnalyzer() { - return analyzer; + ensureOpen(); + return analyzer; } /** Returns the number of documents currently in this index. */ public synchronized int docCount() { + ensureOpen(); int count = ramSegmentInfos.size(); for (int i = 0; i < segmentInfos.size(); i++) { SegmentInfo si = segmentInfos.info(i); @@ -724,6 +763,7 @@ * @throws IOException if there is a low-level IO error */ public void addDocument(Document doc, Analyzer analyzer) throws CorruptIndexException, IOException { + ensureOpen(); SegmentInfo newSegmentInfo = buildSingleDocSegment(doc, analyzer); synchronized (this) { ramSegmentInfos.addElement(newSegmentInfo); @@ -747,6 +787,7 @@ * @throws IOException if there is a low-level IO error */ public synchronized void deleteDocuments(Term term) throws CorruptIndexException, IOException { + ensureOpen(); bufferDeleteTerm(term); maybeFlushRamSegments(); } @@ -760,6 +801,7 @@ * @throws IOException if there is a low-level IO error */ public synchronized void deleteDocuments(Term[] terms) throws CorruptIndexException, IOException { + ensureOpen(); for (int i = 0; i < terms.length; i++) { bufferDeleteTerm(terms[i]); } @@ -779,6 +821,7 @@ * @throws IOException if there is a low-level IO error */ public void updateDocument(Term term, Document doc) throws CorruptIndexException, IOException { + ensureOpen(); updateDocument(term, doc, getAnalyzer()); } @@ -797,6 +840,7 @@ */ public void updateDocument(Term term, Document doc, Analyzer analyzer) throws CorruptIndexException, IOException { + ensureOpen(); SegmentInfo newSegmentInfo = buildSingleDocSegment(doc, analyzer); synchronized (this) { bufferDeleteTerm(term); @@ -931,6 +975,7 @@ * @throws IOException if there is a low-level IO error */ public synchronized void optimize() throws CorruptIndexException, IOException { + ensureOpen(); flushRamSegments(); while (segmentInfos.size() > 1 || (segmentInfos.size() == 1 && @@ -1072,6 +1117,7 @@ public synchronized void addIndexes(Directory[] dirs) throws CorruptIndexException, IOException { + ensureOpen(); optimize(); // start with zero or 1 seg int start = segmentInfos.size(); @@ -1157,6 +1203,7 @@ // 1 flush ram segments + ensureOpen(); flushRamSegments(); // 2 copy segment infos and find the highest level from dirs @@ -1261,6 +1308,7 @@ public synchronized void addIndexes(IndexReader[] readers) throws CorruptIndexException, IOException { + ensureOpen(); optimize(); // start with zero or 1 seg final String mergedName = newSegmentName(); @@ -1409,6 +1457,7 @@ * @throws IOException if there is a low-level IO error */ public final synchronized void flush() throws CorruptIndexException, IOException { + ensureOpen(); flushRamSegments(); } @@ -1416,6 +1465,7 @@ * Useful for size management with flushRamDocs() */ public final long ramSizeInBytes() { + ensureOpen(); return ramDirectory.sizeInBytes(); } @@ -1423,6 +1473,7 @@ * Useful when calling flushRamSegments() */ public final synchronized int numRamDocs() { + ensureOpen(); return ramSegmentInfos.size(); } Index: src/java/org/apache/lucene/index/ParallelReader.java =================================================================== --- src/java/org/apache/lucene/index/ParallelReader.java (revision 517305) +++ src/java/org/apache/lucene/index/ParallelReader.java (working copy) @@ -66,8 +66,11 @@ /** Construct a ParallelReader. */ public ParallelReader() throws IOException { super(null); } - /** Add an IndexReader. */ + /** Add an IndexReader. + * @throws IOException if there is a low-level IO error + */ public void add(IndexReader reader) throws IOException { + ensureOpen(); add(reader, false); } @@ -79,10 +82,12 @@ * of documents * @throws IllegalArgumentException if not all indexes have the same value * of {@link IndexReader#maxDoc()} + * @throws IOException if there is a low-level IO error */ public void add(IndexReader reader, boolean ignoreStoredFields) throws IOException { + ensureOpen(); if (readers.size() == 0) { this.maxDoc = reader.maxDoc(); this.numDocs = reader.numDocs(); @@ -110,14 +115,24 @@ readers.add(reader); } - public int numDocs() { return numDocs; } + public int numDocs() { + // Don't call ensureOpen() here (it could affect performance) + return numDocs; + } - public int maxDoc() { return maxDoc; } + public int maxDoc() { + // Don't call ensureOpen() here (it could affect performance) + return maxDoc; + } - public boolean hasDeletions() { return hasDeletions; } + public boolean hasDeletions() { + // Don't call ensureOpen() here (it could affect performance) + return hasDeletions; + } // check first reader public boolean isDeleted(int n) { + // Don't call ensureOpen() here (it could affect performance) if (readers.size() > 0) return ((IndexReader)readers.get(0)).isDeleted(n); return false; @@ -141,6 +156,7 @@ // append fields from storedFieldReaders public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException { + ensureOpen(); Document result = new Document(); for (int i = 0; i < storedFieldReaders.size(); i++) { IndexReader reader = (IndexReader)storedFieldReaders.get(i); @@ -166,6 +182,7 @@ // get all vectors public TermFreqVector[] getTermFreqVectors(int n) throws IOException { + ensureOpen(); ArrayList results = new ArrayList(); Iterator i = fieldToReader.entrySet().iterator(); while (i.hasNext()) { @@ -182,22 +199,26 @@ public TermFreqVector getTermFreqVector(int n, String field) throws IOException { + ensureOpen(); IndexReader reader = ((IndexReader)fieldToReader.get(field)); return reader==null ? null : reader.getTermFreqVector(n, field); } public boolean hasNorms(String field) throws IOException { + ensureOpen(); IndexReader reader = ((IndexReader)fieldToReader.get(field)); return reader==null ? false : reader.hasNorms(field); } public byte[] norms(String field) throws IOException { + ensureOpen(); IndexReader reader = ((IndexReader)fieldToReader.get(field)); return reader==null ? null : reader.norms(field); } public void norms(String field, byte[] result, int offset) throws IOException { + ensureOpen(); IndexReader reader = ((IndexReader)fieldToReader.get(field)); if (reader!=null) reader.norms(field, result, offset); @@ -211,31 +232,38 @@ } public TermEnum terms() throws IOException { + ensureOpen(); return new ParallelTermEnum(); } public TermEnum terms(Term term) throws IOException { + ensureOpen(); return new ParallelTermEnum(term); } public int docFreq(Term term) throws IOException { + ensureOpen(); IndexReader reader = ((IndexReader)fieldToReader.get(term.field())); return reader==null ? 0 : reader.docFreq(term); } public TermDocs termDocs(Term term) throws IOException { + ensureOpen(); return new ParallelTermDocs(term); } public TermDocs termDocs() throws IOException { + ensureOpen(); return new ParallelTermDocs(); } public TermPositions termPositions(Term term) throws IOException { + ensureOpen(); return new ParallelTermPositions(term); } public TermPositions termPositions() throws IOException { + ensureOpen(); return new ParallelTermPositions(); } @@ -251,6 +279,7 @@ public Collection getFieldNames (IndexReader.FieldOption fieldNames) { + ensureOpen(); Set fieldSet = new HashSet(); for (int i = 0; i < readers.size(); i++) { IndexReader reader = ((IndexReader)readers.get(i)); Index: src/java/org/apache/lucene/index/SegmentReader.java =================================================================== --- src/java/org/apache/lucene/index/SegmentReader.java (revision 517305) +++ src/java/org/apache/lucene/index/SegmentReader.java (working copy) @@ -290,10 +290,12 @@ } static boolean hasDeletions(SegmentInfo si) throws IOException { + // Don't call ensureOpen() here (it could affect performance) return si.hasDeletions(); } public boolean hasDeletions() { + // Don't call ensureOpen() here (it could affect performance) return deletedDocs != null; } @@ -354,10 +356,12 @@ } public TermEnum terms() { + ensureOpen(); return tis.terms(); } public TermEnum terms(Term t) throws IOException { + ensureOpen(); return tis.terms(t); } @@ -366,6 +370,7 @@ * @throws IOException if there is a low-level IO error */ public synchronized Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException { + ensureOpen(); if (isDeleted(n)) throw new IllegalArgumentException ("attempt to access a deleted document"); @@ -377,14 +382,17 @@ } public TermDocs termDocs() throws IOException { + ensureOpen(); return new SegmentTermDocs(this); } public TermPositions termPositions() throws IOException { + ensureOpen(); return new SegmentTermPositions(this); } public int docFreq(Term t) throws IOException { + ensureOpen(); TermInfo ti = tis.get(t); if (ti != null) return ti.docFreq; @@ -393,6 +401,7 @@ } public int numDocs() { + // Don't call ensureOpen() here (it could affect performance) int n = maxDoc(); if (deletedDocs != null) n -= deletedDocs.count(); @@ -400,6 +409,7 @@ } public int maxDoc() { + // Don't call ensureOpen() here (it could affect performance) return si.docCount; } @@ -407,6 +417,7 @@ * @see IndexReader#getFieldNames(IndexReader.FieldOption fldOption) */ public Collection getFieldNames(IndexReader.FieldOption fieldOption) { + ensureOpen(); Set fieldSet = new HashSet(); for (int i = 0; i < fieldInfos.size(); i++) { @@ -448,6 +459,7 @@ public synchronized boolean hasNorms(String field) { + ensureOpen(); return norms.containsKey(field); } @@ -480,6 +492,7 @@ // returns fake norms if norms aren't available public synchronized byte[] norms(String field) throws IOException { + ensureOpen(); byte[] bytes = getNorms(field); if (bytes==null) bytes=fakeNorms(); return bytes; @@ -501,6 +514,7 @@ public synchronized void norms(String field, byte[] bytes, int offset) throws IOException { + ensureOpen(); Norm norm = (Norm) norms.get(field); if (norm == null) { System.arraycopy(fakeNorms(), 0, bytes, offset, maxDoc()); @@ -591,6 +605,7 @@ */ public TermFreqVector getTermFreqVector(int docNumber, String field) throws IOException { // Check if this field is invalid or has no stored term vector + ensureOpen(); FieldInfo fi = fieldInfos.fieldInfo(field); if (fi == null || !fi.storeTermVector || termVectorsReaderOrig == null) return null; @@ -611,6 +626,7 @@ * @throws IOException */ public TermFreqVector[] getTermFreqVectors(int docNumber) throws IOException { + ensureOpen(); if (termVectorsReaderOrig == null) return null; Index: src/java/org/apache/lucene/store/RAMDirectory.java =================================================================== --- src/java/org/apache/lucene/store/RAMDirectory.java (revision 517305) +++ src/java/org/apache/lucene/store/RAMDirectory.java (working copy) @@ -97,6 +97,7 @@ /** Returns an array of strings, one for each file in the directory. */ public synchronized final String[] list() { + ensureOpen(); Set fileNames = fileMap.keySet(); String[] result = new String[fileNames.size()]; int i = 0; @@ -108,6 +109,7 @@ /** Returns true iff the named file exists in this directory. */ public final boolean fileExists(String name) { + ensureOpen(); RAMFile file; synchronized (this) { file = (RAMFile)fileMap.get(name); @@ -119,6 +121,7 @@ * @throws IOException if the file does not exist */ public final long fileModified(String name) throws IOException { + ensureOpen(); RAMFile file; synchronized (this) { file = (RAMFile)fileMap.get(name); @@ -132,6 +135,7 @@ * @throws IOException if the file does not exist */ public void touchFile(String name) throws IOException { + ensureOpen(); RAMFile file; synchronized (this) { file = (RAMFile)fileMap.get(name); @@ -154,6 +158,7 @@ * @throws IOException if the file does not exist */ public final long fileLength(String name) throws IOException { + ensureOpen(); RAMFile file; synchronized (this) { file = (RAMFile)fileMap.get(name); @@ -167,6 +172,7 @@ * directory. This is currently quantized to * BufferedIndexOutput.BUFFER_SIZE. */ public synchronized final long sizeInBytes() { + ensureOpen(); return sizeInBytes; } @@ -174,6 +180,7 @@ * @throws IOException if the file does not exist */ public synchronized void deleteFile(String name) throws IOException { + ensureOpen(); RAMFile file = (RAMFile)fileMap.get(name); if (file!=null) { fileMap.remove(name); @@ -188,6 +195,7 @@ * @deprecated */ public synchronized final void renameFile(String from, String to) throws IOException { + ensureOpen(); RAMFile fromFile = (RAMFile)fileMap.get(from); if (fromFile==null) throw new FileNotFoundException(from); @@ -202,6 +210,7 @@ /** Creates a new, empty file in the directory with the given name. Returns a stream writing this file. */ public IndexOutput createOutput(String name) { + ensureOpen(); RAMFile file = new RAMFile(this); synchronized (this) { RAMFile existing = (RAMFile)fileMap.get(name); @@ -216,6 +225,7 @@ /** Returns a stream reading an existing file. */ public IndexInput openInput(String name) throws IOException { + ensureOpen(); RAMFile file; synchronized (this) { file = (RAMFile)fileMap.get(name); @@ -230,4 +240,12 @@ fileMap = null; } + /** + * @throws AlreadyClosedException if this IndexReader is closed + */ + protected final void ensureOpen() throws AlreadyClosedException { + if (fileMap == null) { + throw new AlreadyClosedException("this RAMDirectory is closed"); + } + } } Index: src/java/org/apache/lucene/store/AlreadyClosedException.java =================================================================== --- src/java/org/apache/lucene/store/AlreadyClosedException.java (revision 0) +++ src/java/org/apache/lucene/store/AlreadyClosedException.java (revision 0) @@ -0,0 +1,28 @@ +package org.apache.lucene.store; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This exception is thrown when there is an attempt to + * access something that has already been closed. + */ +public class AlreadyClosedException extends IllegalStateException { + public AlreadyClosedException(String message) { + super(message); + } +} Property changes on: src/java/org/apache/lucene/store/AlreadyClosedException.java ___________________________________________________________________ Name: svn:eol-style + native