Index: CHANGES.txt
===================================================================
--- CHANGES.txt (revision 788015)
+++ CHANGES.txt (working copy)
@@ -116,6 +116,11 @@
deleted. You can call IndexReader.isDeleted(n) prior to calling document(n).
(Shai Erera via Mike McCandless)
+ 8. LUCENE-1707 - IndexReader and IndexWriter no longer check if they are still
+ open before they perform an operation. If your code relies on this behavior,
+ you should call isOpen() to ensure the reader/writer is still open.
+ (Shai Erera via ?)
+
API Changes
1. LUCENE-1419: Add expert API to set custom indexing chain. This API is
Index: src/java/org/apache/lucene/index/DirectoryOwningReader.java
===================================================================
--- src/java/org/apache/lucene/index/DirectoryOwningReader.java (revision 788015)
+++ src/java/org/apache/lucene/index/DirectoryOwningReader.java (working copy)
@@ -41,7 +41,6 @@
}
public IndexReader reopen() throws CorruptIndexException, IOException {
- ensureOpen();
final IndexReader r = in.reopen();
if (r != in)
return new DirectoryOwningReader(r, ref);
@@ -49,7 +48,6 @@
}
public IndexReader reopen(boolean openReadOnly) throws CorruptIndexException, IOException {
- ensureOpen();
final IndexReader r = in.reopen(openReadOnly);
if (r != in)
return new DirectoryOwningReader(r, ref);
@@ -57,7 +55,6 @@
}
public IndexReader reopen(final IndexCommit commit) throws CorruptIndexException, IOException {
- ensureOpen();
final IndexReader r = in.reopen(commit);
if (r != in)
return new DirectoryOwningReader(r, ref);
@@ -65,12 +62,10 @@
}
public Object clone() {
- ensureOpen();
return new DirectoryOwningReader((IndexReader) in.clone(), ref);
}
public IndexReader clone(boolean openReadOnly) throws CorruptIndexException, IOException {
- ensureOpen();
return new DirectoryOwningReader(in.clone(openReadOnly), ref);
}
Index: src/java/org/apache/lucene/index/DirectoryReader.java
===================================================================
--- src/java/org/apache/lucene/index/DirectoryReader.java (revision 788015)
+++ src/java/org/apache/lucene/index/DirectoryReader.java (working copy)
@@ -350,7 +350,6 @@
}
private synchronized IndexReader doReopen(final boolean openReadOnly, IndexCommit commit) throws CorruptIndexException, IOException {
- ensureOpen();
assert commit == null || openReadOnly;
@@ -437,32 +436,27 @@
/** Version number when this IndexReader was opened. */
public long getVersion() {
- ensureOpen();
return segmentInfos.getVersion();
}
public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
- ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
}
public TermFreqVector getTermFreqVector(int n, String field)
throws IOException {
- ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVector(n - starts[i], field);
}
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
- ensureOpen();
int i = readerIndex(docNumber); // find segment num
subReaders[i].getTermFreqVector(docNumber - starts[i], field, mapper);
}
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
- ensureOpen();
int i = readerIndex(docNumber); // find segment num
subReaders[i].getTermFreqVector(docNumber - starts[i], mapper);
}
@@ -472,7 +466,6 @@
* @return true if the index is optimized; false otherwise
*/
public boolean isOptimized() {
- ensureOpen();
return segmentInfos.size() == 1 && !hasDeletions();
}
@@ -494,7 +487,6 @@
// inherit javadoc
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
- ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader
}
@@ -551,7 +543,6 @@
}
public boolean hasNorms(String field) throws IOException {
- ensureOpen();
for (int i = 0; i < subReaders.length; i++) {
if (subReaders[i].hasNorms(field)) return true;
}
@@ -565,7 +556,6 @@
}
public synchronized byte[] norms(String field) throws IOException {
- ensureOpen();
byte[] bytes = (byte[])normsCache.get(field);
if (bytes != null)
return bytes; // cache hit
@@ -581,7 +571,6 @@
public synchronized void norms(String field, byte[] result, int offset)
throws IOException {
- ensureOpen();
byte[] bytes = (byte[])normsCache.get(field);
if (bytes==null && !hasNorms(field)) {
Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f));
@@ -604,17 +593,14 @@
}
public TermEnum terms() throws IOException {
- ensureOpen();
return new MultiTermEnum(this, subReaders, starts, null);
}
public TermEnum terms(Term term) throws IOException {
- ensureOpen();
return new MultiTermEnum(this, subReaders, starts, term);
}
public int docFreq(Term t) throws IOException {
- ensureOpen();
int total = 0; // sum freqs in segments
for (int i = 0; i < subReaders.length; i++)
total += subReaders[i].docFreq(t);
@@ -622,12 +608,10 @@
}
public TermDocs termDocs() throws IOException {
- ensureOpen();
return new MultiTermDocs(this, subReaders, starts);
}
public TermPositions termPositions() throws IOException {
- ensureOpen();
return new MultiTermPositions(this, subReaders, starts);
}
@@ -652,7 +636,6 @@
}
if (segmentInfos != null) {
- ensureOpen();
if (stale)
throw new StaleReaderException("IndexReader out of date and no longer valid for delete, undelete, or setNorm operations");
@@ -773,7 +756,6 @@
}
public Map getCommitUserData() {
- ensureOpen();
return segmentInfos.getUserData();
}
@@ -788,7 +770,6 @@
* @throws IOException if there is a low-level IO error
*/
public boolean isCurrent() throws CorruptIndexException, IOException {
- ensureOpen();
return SegmentInfos.readCurrentVersion(directory) == segmentInfos.getVersion();
}
@@ -807,7 +788,6 @@
}
public Collection getFieldNames (IndexReader.FieldOption fieldNames) {
- ensureOpen();
return getFieldNames(fieldNames, this.subReaders);
}
Index: src/java/org/apache/lucene/index/FilterIndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/FilterIndexReader.java (revision 788015)
+++ src/java/org/apache/lucene/index/FilterIndexReader.java (working copy)
@@ -111,25 +111,20 @@
public TermFreqVector[] getTermFreqVectors(int docNumber)
throws IOException {
- ensureOpen();
return in.getTermFreqVectors(docNumber);
}
public TermFreqVector getTermFreqVector(int docNumber, String field)
throws IOException {
- ensureOpen();
return in.getTermFreqVector(docNumber, field);
}
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
- ensureOpen();
in.getTermFreqVector(docNumber, field, mapper);
-
}
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
- ensureOpen();
in.getTermFreqVector(docNumber, mapper);
}
@@ -144,7 +139,6 @@
}
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
- ensureOpen();
return in.document(n, fieldSelector);
}
@@ -161,17 +155,14 @@
protected void doUndeleteAll() throws CorruptIndexException, IOException {in.undeleteAll();}
public boolean hasNorms(String field) throws IOException {
- ensureOpen();
return in.hasNorms(field);
}
public byte[] norms(String f) throws IOException {
- ensureOpen();
return in.norms(f);
}
public void norms(String f, byte[] bytes, int offset) throws IOException {
- ensureOpen();
in.norms(f, bytes, offset);
}
@@ -180,32 +171,26 @@
}
public TermEnum terms() throws IOException {
- ensureOpen();
return in.terms();
}
public TermEnum terms(Term t) throws IOException {
- ensureOpen();
return in.terms(t);
}
public int docFreq(Term t) throws IOException {
- ensureOpen();
return in.docFreq(t);
}
public TermDocs termDocs() throws IOException {
- ensureOpen();
return in.termDocs();
}
public TermDocs termDocs(Term term) throws IOException {
- ensureOpen();
return in.termDocs(term);
}
public TermPositions termPositions() throws IOException {
- ensureOpen();
return in.termPositions();
}
@@ -220,22 +205,18 @@
public Collection getFieldNames(IndexReader.FieldOption fieldNames) {
- ensureOpen();
return in.getFieldNames(fieldNames);
}
public long getVersion() {
- ensureOpen();
return in.getVersion();
}
public boolean isCurrent() throws CorruptIndexException, IOException {
- ensureOpen();
return in.isCurrent();
}
public boolean isOptimized() {
- ensureOpen();
return in.isOptimized();
}
Index: src/java/org/apache/lucene/index/IndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/IndexReader.java (revision 788015)
+++ src/java/org/apache/lucene/index/IndexReader.java (working copy)
@@ -138,7 +138,6 @@
*/
public synchronized void incRef() {
assert refCount > 0;
- ensureOpen();
refCount++;
}
@@ -154,7 +153,6 @@
*/
public synchronized void decRef() throws IOException {
assert refCount > 0;
- ensureOpen();
if (refCount == 1) {
commit();
doClose();
@@ -190,15 +188,31 @@
protected IndexReader() {
refCount = 1;
}
-
+
/**
- * @throws AlreadyClosedException if this IndexReader is closed
+ * @throws AlreadyClosedException
+ * if this IndexReader is closed
+ * @deprecated will be removed in 3.0. If you want to ensure an operation does
+ * not occur on a close reader, call {@link #isOpen()} before.
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (refCount <= 0) {
throw new AlreadyClosedException("this IndexReader is closed");
}
}
+
+ /**
+ * Returns true iff this {@link IndexReader} instance is open, by at least one
+ * thread.
+ * NOTE: since 2.9, the code does not check if the instance is open before
+ * doing any operation. Usually this is not needed as the application knows if
+ * the reader is open or close. However, if your code does not know if a given
+ * reader is open/close and rely on the previously thrown
+ * {@link AlreadyClosedException}, then you use this method.
+ */
+ public final synchronized boolean isOpen() {
+ return refCount > 0;
+ }
/** Returns a read/write IndexReader reading the index in an FSDirectory in the named
* path.
@@ -509,7 +523,6 @@
* @throws UnsupportedOperationException if no directory
*/
public Directory directory() {
- ensureOpen();
if (null != directory) {
return directory;
} else {
@@ -833,7 +846,6 @@
* @throws IOException if there is a low-level IO error
*/
public Document document(int n) throws CorruptIndexException, IOException {
- ensureOpen();
return document(n, null);
}
@@ -880,7 +892,6 @@
public boolean hasNorms(String field) throws IOException {
// backward compatible implementation.
// SegmentReader has an efficient implementation.
- ensureOpen();
return norms(field) != null;
}
@@ -920,7 +931,6 @@
*/
public synchronized void setNorm(int doc, String field, byte value)
throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
- ensureOpen();
acquireWriteLock();
hasChanges = true;
doSetNorm(doc, field, value);
@@ -946,7 +956,6 @@
*/
public void setNorm(int doc, String field, float value)
throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
- ensureOpen();
setNorm(doc, field, Similarity.encodeNorm(value));
}
@@ -988,7 +997,6 @@
* @throws IOException if there is a low-level IO error
*/
public TermDocs termDocs(Term term) throws IOException {
- ensureOpen();
TermDocs termDocs = termDocs();
termDocs.seek(term);
return termDocs;
@@ -1017,7 +1025,6 @@
* @throws IOException if there is a low-level IO error
*/
public TermPositions termPositions(Term term) throws IOException {
- ensureOpen();
TermPositions termPositions = termPositions();
termPositions.seek(term);
return termPositions;
@@ -1046,7 +1053,6 @@
* @throws IOException if there is a low-level IO error
*/
public synchronized void deleteDocument(int docNum) throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
- ensureOpen();
acquireWriteLock();
hasChanges = true;
doDelete(docNum);
@@ -1077,7 +1083,6 @@
* @throws IOException if there is a low-level IO error
*/
public int deleteDocuments(Term term) throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
- ensureOpen();
TermDocs docs = termDocs(term);
if (docs == null) return 0;
int n = 0;
@@ -1103,7 +1108,6 @@
* @throws IOException if there is a low-level IO error
*/
public synchronized void undeleteAll() throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
- ensureOpen();
acquireWriteLock();
hasChanges = true;
doUndeleteAll();
@@ -1123,7 +1127,6 @@
* @throws IOException
*/
public final synchronized void flush() throws IOException {
- ensureOpen();
commit();
}
@@ -1135,7 +1138,6 @@
* @throws IOException
*/
public final synchronized void flush(Map commitUserData) throws IOException {
- ensureOpen();
commit(commitUserData);
}
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java (revision 788015)
+++ src/java/org/apache/lucene/index/IndexWriter.java (working copy)
@@ -687,6 +687,18 @@
}
/**
+ * Returns true iff {@link #close()} or {@link #close(boolean)} were not
+ * called yet.
+ * NOTE: since 2.9, many methods do not check if the writer has been
+ * closed already, before they execute. Therefore if your code relied on that
+ * behavior, you should check if the writer is still open before you attempt
+ * to do any operation on it.
+ */
+ public synchronized final boolean isOpen() {
+ return isOpen(true);
+ }
+
+ /**
* Used internally to throw an {@link
* AlreadyClosedException} if this IndexWriter has been
* closed.
@@ -769,7 +781,6 @@
* @see Similarity#setDefault(Similarity)
*/
public void setSimilarity(Similarity similarity) {
- ensureOpen();
this.similarity = similarity;
docWriter.setSimilarity(similarity);
}
@@ -779,7 +790,6 @@
*
This defaults to the current value of {@link Similarity#getDefault()}. */ public Similarity getSimilarity() { - ensureOpen(); return this.similarity; } @@ -805,7 +815,6 @@ * @see #DEFAULT_TERM_INDEX_INTERVAL */ public void setTermIndexInterval(int interval) { - ensureOpen(); this.termIndexInterval = interval; } @@ -1545,7 +1554,6 @@ * Expert: set the merge policy used by this writer. */ public void setMergePolicy(MergePolicy mp) { - ensureOpen(); if (mp == null) throw new NullPointerException("MergePolicy must be non-null"); @@ -1562,7 +1570,6 @@ * @see #setMergePolicy */ public MergePolicy getMergePolicy() { - ensureOpen(); return mergePolicy; } @@ -1570,7 +1577,6 @@ * Expert: set the merge scheduler used by this writer. */ synchronized public void setMergeScheduler(MergeScheduler mergeScheduler) throws CorruptIndexException, IOException { - ensureOpen(); if (mergeScheduler == null) throw new NullPointerException("MergeScheduler must be non-null"); @@ -1589,7 +1595,6 @@ * @see #setMergePolicy */ public MergeScheduler getMergeScheduler() { - ensureOpen(); return mergeScheduler; } @@ -1647,7 +1652,6 @@ * will be indexed for a field. */ public void setMaxFieldLength(int maxFieldLength) { - ensureOpen(); this.maxFieldLength = maxFieldLength; docWriter.setMaxFieldLength(maxFieldLength); if (infoStream != null) @@ -1660,7 +1664,6 @@ * @see #setMaxFieldLength */ public int getMaxFieldLength() { - ensureOpen(); return maxFieldLength; } @@ -1684,7 +1687,6 @@ * @see #setRAMBufferSizeMB */ public void setMaxBufferedDocs(int maxBufferedDocs) { - ensureOpen(); if (maxBufferedDocs != DISABLE_AUTO_FLUSH && maxBufferedDocs < 2) throw new IllegalArgumentException( "maxBufferedDocs must at least be 2 when enabled"); @@ -1724,7 +1726,6 @@ * @see #setMaxBufferedDocs */ public int getMaxBufferedDocs() { - ensureOpen(); return docWriter.getMaxBufferedDocs(); } @@ -1779,7 +1780,6 @@ * @see #setRAMBufferSizeMB */ public void setMaxBufferedDeleteTerms(int maxBufferedDeleteTerms) { - ensureOpen(); if (maxBufferedDeleteTerms != DISABLE_AUTO_FLUSH && maxBufferedDeleteTerms < 1) throw new IllegalArgumentException( @@ -1795,7 +1795,6 @@ * @see #setMaxBufferedDeleteTerms */ public int getMaxBufferedDeleteTerms() { - ensureOpen(); return docWriter.getMaxBufferedDeleteTerms(); } @@ -1881,7 +1880,6 @@ * to this. */ public void setInfoStream(PrintStream infoStream) { - ensureOpen(); setMessageID(infoStream); docWriter.setInfoStream(infoStream); deleter.setInfoStream(infoStream); @@ -1906,7 +1904,6 @@ * @see #setInfoStream */ public PrintStream getInfoStream() { - ensureOpen(); return infoStream; } @@ -1920,7 +1917,6 @@ * @see #setDefaultWriteLockTimeout to change the default value for all instances of IndexWriter. */ public void setWriteLockTimeout(long writeLockTimeout) { - ensureOpen(); this.writeLockTimeout = writeLockTimeout; } @@ -1929,7 +1925,6 @@ * @see #setWriteLockTimeout */ public long getWriteLockTimeout() { - ensureOpen(); return writeLockTimeout; } @@ -2201,7 +2196,6 @@ /** Returns the analyzer used by this index. */ public Analyzer getAnalyzer() { - ensureOpen(); return analyzer; } @@ -2211,7 +2205,6 @@ * method) or {@link #numDocs()} (also takes deletions * into account), instead. */ public synchronized int docCount() { - ensureOpen(); return maxDoc(); } @@ -2252,7 +2245,6 @@ } public synchronized boolean hasDeletions() throws IOException { - ensureOpen(); if (docWriter.hasDeletes()) return true; for (int i = 0; i < segmentInfos.size(); i++) @@ -2343,7 +2335,6 @@ * @throws IOException if there is a low-level IO error */ public void addDocument(Document doc, Analyzer analyzer) throws CorruptIndexException, IOException { - ensureOpen(); boolean doFlush = false; boolean success = false; try { @@ -2386,7 +2377,6 @@ * @throws IOException if there is a low-level IO error */ public void deleteDocuments(Term term) throws CorruptIndexException, IOException { - ensureOpen(); try { boolean doFlush = docWriter.bufferDeleteTerm(term); if (doFlush) @@ -2410,7 +2400,6 @@ * @throws IOException if there is a low-level IO error */ public void deleteDocuments(Term[] terms) throws CorruptIndexException, IOException { - ensureOpen(); try { boolean doFlush = docWriter.bufferDeleteTerms(terms); if (doFlush) @@ -2432,7 +2421,6 @@ * @throws IOException if there is a low-level IO error */ public void deleteDocuments(Query query) throws CorruptIndexException, IOException { - ensureOpen(); boolean doFlush = docWriter.bufferDeleteQuery(query); if (doFlush) flush(true, false, false); @@ -2452,7 +2440,6 @@ * @throws IOException if there is a low-level IO error */ public void deleteDocuments(Query[] queries) throws CorruptIndexException, IOException { - ensureOpen(); boolean doFlush = docWriter.bufferDeleteQueries(queries); if (doFlush) flush(true, false, false); @@ -2476,7 +2463,6 @@ * @throws IOException if there is a low-level IO error */ public void updateDocument(Term term, Document doc) throws CorruptIndexException, IOException { - ensureOpen(); updateDocument(term, doc, getAnalyzer()); } @@ -2500,7 +2486,6 @@ */ public void updateDocument(Term term, Document doc, Analyzer analyzer) throws CorruptIndexException, IOException { - ensureOpen(); try { boolean doFlush = false; boolean success = false; @@ -2679,8 +2664,6 @@ * href="#OOME">above for details.
*/ public void optimize(int maxNumSegments, boolean doWait) throws CorruptIndexException, IOException { - ensureOpen(); - if (maxNumSegments < 1) throw new IllegalArgumentException("maxNumSegments must be >= 1; got " + maxNumSegments); @@ -2786,7 +2769,6 @@ */ public void expungeDeletes(boolean doWait) throws CorruptIndexException, IOException { - ensureOpen(); if (infoStream != null) message("expungeDeletes: index now " + segString()); @@ -3160,7 +3142,6 @@ * @throws IOException if there is a low-level IO error */ public void rollback() throws IOException { - ensureOpen(); if (autoCommit) throw new IllegalStateException("rollback() can only be called when IndexWriter was opened with autoCommit=false"); @@ -3362,11 +3343,9 @@ * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ - public void addIndexes(Directory[] dirs) + public void addIndexes(Directory[] dirs) throws CorruptIndexException, IOException { - ensureOpen(); - noDupDirs(dirs); // Do not allow add docs or deletes while we are running: @@ -3490,8 +3469,6 @@ public void addIndexesNoOptimize(Directory[] dirs) throws CorruptIndexException, IOException { - ensureOpen(); - noDupDirs(dirs); // Do not allow add docs or deletes while we are running: @@ -3665,8 +3642,6 @@ public void addIndexes(IndexReader[] readers) throws CorruptIndexException, IOException { - ensureOpen(); - // Do not allow add docs or deletes while we are running: docWriter.pauseAllThreads(); @@ -3845,7 +3820,6 @@ * * @see #prepareCommit(Map) */ public final void prepareCommit() throws CorruptIndexException, IOException { - ensureOpen(); prepareCommit(null); } @@ -3956,8 +3930,6 @@ */ public final void commit(Map commitUserData) throws CorruptIndexException, IOException { - ensureOpen(); - if (infoStream != null) message("commit: start"); @@ -4202,14 +4174,12 @@ * Useful for size management with flushRamDocs() */ public final long ramSizeInBytes() { - ensureOpen(); return docWriter.getRAMUsed(); } /** Expert: Return the number of documents currently * buffered in RAM. */ public final synchronized int numRamDocs() { - ensureOpen(); return docWriter.getNumDocsInRAM(); } Index: src/java/org/apache/lucene/index/MultiReader.java =================================================================== --- src/java/org/apache/lucene/index/MultiReader.java (revision 788015) +++ src/java/org/apache/lucene/index/MultiReader.java (working copy) @@ -138,8 +138,6 @@ * @throws IOException */ protected IndexReader doReopen(boolean doClone) throws CorruptIndexException, IOException { - ensureOpen(); - boolean reopened = false; IndexReader[] newSubReaders = new IndexReader[subReaders.length]; @@ -189,27 +187,23 @@ } public TermFreqVector[] getTermFreqVectors(int n) throws IOException { - ensureOpen(); int i = readerIndex(n); // find segment num return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment } public TermFreqVector getTermFreqVector(int n, String field) throws IOException { - ensureOpen(); int i = readerIndex(n); // find segment num return subReaders[i].getTermFreqVector(n - starts[i], field); } public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException { - ensureOpen(); int i = readerIndex(docNumber); // find segment num subReaders[i].getTermFreqVector(docNumber - starts[i], field, mapper); } public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException { - ensureOpen(); int i = readerIndex(docNumber); // find segment num subReaders[i].getTermFreqVector(docNumber - starts[i], mapper); } @@ -236,7 +230,6 @@ // inherit javadoc public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException { - ensureOpen(); int i = readerIndex(n); // find segment num return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader } @@ -272,7 +265,6 @@ } public boolean hasNorms(String field) throws IOException { - ensureOpen(); for (int i = 0; i < subReaders.length; i++) { if (subReaders[i].hasNorms(field)) return true; } @@ -286,7 +278,6 @@ } public synchronized byte[] norms(String field) throws IOException { - ensureOpen(); byte[] bytes = (byte[])normsCache.get(field); if (bytes != null) return bytes; // cache hit @@ -302,7 +293,6 @@ public synchronized void norms(String field, byte[] result, int offset) throws IOException { - ensureOpen(); byte[] bytes = (byte[])normsCache.get(field); for (int i = 0; i < subReaders.length; i++) // read from segments subReaders[i].norms(field, result, offset + starts[i]); @@ -328,17 +318,14 @@ } public TermEnum terms() throws IOException { - ensureOpen(); return new MultiTermEnum(this, subReaders, starts, null); } public TermEnum terms(Term term) throws IOException { - ensureOpen(); return new MultiTermEnum(this, subReaders, starts, term); } public int docFreq(Term t) throws IOException { - ensureOpen(); int total = 0; // sum freqs in segments for (int i = 0; i < subReaders.length; i++) total += subReaders[i].docFreq(t); @@ -346,12 +333,10 @@ } public TermDocs termDocs() throws IOException { - ensureOpen(); return new MultiTermDocs(this, subReaders, starts); } public TermPositions termPositions() throws IOException { - ensureOpen(); return new MultiTermPositions(this, subReaders, starts); } @@ -376,7 +361,6 @@ } public Collection getFieldNames (IndexReader.FieldOption fieldNames) { - ensureOpen(); return DirectoryReader.getFieldNames(fieldNames, this.subReaders); } Index: src/java/org/apache/lucene/index/ParallelReader.java =================================================================== --- src/java/org/apache/lucene/index/ParallelReader.java (revision 788015) +++ src/java/org/apache/lucene/index/ParallelReader.java (working copy) @@ -73,7 +73,6 @@ * @throws IOException if there is a low-level IO error */ public void add(IndexReader reader) throws IOException { - ensureOpen(); add(reader, false); } @@ -90,7 +89,6 @@ public void add(IndexReader reader, boolean ignoreStoredFields) throws IOException { - ensureOpen(); if (readers.size() == 0) { this.maxDoc = reader.maxDoc(); this.numDocs = reader.numDocs(); @@ -155,7 +153,6 @@ } protected IndexReader doReopen(boolean doClone) throws CorruptIndexException, IOException { - ensureOpen(); boolean reopened = false; List newReaders = new ArrayList(); @@ -261,7 +258,6 @@ // append fields from storedFieldReaders public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException { - ensureOpen(); Document result = new Document(); for (int i = 0; i < storedFieldReaders.size(); i++) { IndexReader reader = (IndexReader)storedFieldReaders.get(i); @@ -287,7 +283,6 @@ // get all vectors public TermFreqVector[] getTermFreqVectors(int n) throws IOException { - ensureOpen(); ArrayList results = new ArrayList(); Iterator i = fieldToReader.entrySet().iterator(); while (i.hasNext()) { @@ -304,14 +299,12 @@ public TermFreqVector getTermFreqVector(int n, String field) throws IOException { - ensureOpen(); IndexReader reader = ((IndexReader)fieldToReader.get(field)); return reader==null ? null : reader.getTermFreqVector(n, field); } public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException { - ensureOpen(); IndexReader reader = ((IndexReader)fieldToReader.get(field)); if (reader != null) { reader.getTermFreqVector(docNumber, field, mapper); @@ -319,8 +312,6 @@ } public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException { - ensureOpen(); - Iterator i = fieldToReader.entrySet().iterator(); while (i.hasNext()) { Map.Entry e = (Map.Entry)i.next(); @@ -332,20 +323,17 @@ } public boolean hasNorms(String field) throws IOException { - ensureOpen(); IndexReader reader = ((IndexReader)fieldToReader.get(field)); return reader==null ? false : reader.hasNorms(field); } public byte[] norms(String field) throws IOException { - ensureOpen(); IndexReader reader = ((IndexReader)fieldToReader.get(field)); return reader==null ? null : reader.norms(field); } public void norms(String field, byte[] result, int offset) throws IOException { - ensureOpen(); IndexReader reader = ((IndexReader)fieldToReader.get(field)); if (reader!=null) reader.norms(field, result, offset); @@ -359,38 +347,31 @@ } public TermEnum terms() throws IOException { - ensureOpen(); return new ParallelTermEnum(); } public TermEnum terms(Term term) throws IOException { - ensureOpen(); return new ParallelTermEnum(term); } public int docFreq(Term term) throws IOException { - ensureOpen(); IndexReader reader = ((IndexReader)fieldToReader.get(term.field())); return reader==null ? 0 : reader.docFreq(term); } public TermDocs termDocs(Term term) throws IOException { - ensureOpen(); return new ParallelTermDocs(term); } public TermDocs termDocs() throws IOException { - ensureOpen(); return new ParallelTermDocs(); } public TermPositions termPositions(Term term) throws IOException { - ensureOpen(); return new ParallelTermPositions(term); } public TermPositions termPositions() throws IOException { - ensureOpen(); return new ParallelTermPositions(); } @@ -456,7 +437,6 @@ } public Collection getFieldNames (IndexReader.FieldOption fieldNames) { - ensureOpen(); Set fieldSet = new HashSet(); for (int i = 0; i < readers.size(); i++) { IndexReader reader = ((IndexReader)readers.get(i)); @@ -609,7 +589,6 @@ return ((TermPositions)termDocs).getPayload(data, offset); } - // TODO: Remove warning after API has been finalized public boolean isPayloadAvailable() { return ((TermPositions) termDocs).isPayloadAvailable(); @@ -617,8 +596,3 @@ } } - - - - - Index: src/java/org/apache/lucene/index/SegmentReader.java =================================================================== --- src/java/org/apache/lucene/index/SegmentReader.java (revision 788015) +++ src/java/org/apache/lucene/index/SegmentReader.java (working copy) @@ -836,12 +836,10 @@ } public TermEnum terms() { - ensureOpen(); return tis.terms(); } public TermEnum terms(Term t) throws IOException { - ensureOpen(); return tis.terms(t); } @@ -850,7 +848,6 @@ } public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException { - ensureOpen(); return getFieldsReader().doc(n, fieldSelector); } @@ -867,17 +864,14 @@ } public TermDocs termDocs() throws IOException { - ensureOpen(); return new SegmentTermDocs(this); } public TermPositions termPositions() throws IOException { - ensureOpen(); return new SegmentTermPositions(this); } public int docFreq(Term t) throws IOException { - ensureOpen(); TermInfo ti = tis.get(t); if (ti != null) return ti.docFreq; @@ -910,8 +904,6 @@ * @see IndexReader#getFieldNames(IndexReader.FieldOption fldOption) */ public Collection getFieldNames(IndexReader.FieldOption fieldOption) { - ensureOpen(); - Set fieldSet = new HashSet(); for (int i = 0; i < fieldInfos.size(); i++) { FieldInfo fi = fieldInfos.fieldInfo(i); @@ -958,7 +950,6 @@ public synchronized boolean hasNorms(String field) { - ensureOpen(); return norms.containsKey(field); } @@ -984,7 +975,6 @@ // returns fake norms if norms aren't available public synchronized byte[] norms(String field) throws IOException { - ensureOpen(); byte[] bytes = getNorms(field); if (bytes==null && !getDisableFakeNorms()) bytes=fakeNorms(); return bytes; @@ -1004,7 +994,6 @@ public synchronized void norms(String field, byte[] bytes, int offset) throws IOException { - ensureOpen(); Norm norm = (Norm) norms.get(field); if (norm == null) { Arrays.fill(bytes, offset, bytes.length, DefaultSimilarity.encodeNorm(1.0f)); @@ -1107,7 +1096,6 @@ */ public TermFreqVector getTermFreqVector(int docNumber, String field) throws IOException { // Check if this field is invalid or has no stored term vector - ensureOpen(); FieldInfo fi = fieldInfos.fieldInfo(field); if (fi == null || !fi.storeTermVector || termVectorsReaderOrig == null) return null; @@ -1121,7 +1109,6 @@ public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException { - ensureOpen(); FieldInfo fi = fieldInfos.fieldInfo(field); if (fi == null || !fi.storeTermVector || termVectorsReaderOrig == null) return; @@ -1138,7 +1125,6 @@ public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException { - ensureOpen(); if (termVectorsReaderOrig == null) return; @@ -1157,7 +1143,6 @@ * @throws IOException */ public TermFreqVector[] getTermFreqVectors(int docNumber) throws IOException { - ensureOpen(); if (termVectorsReaderOrig == null) return null; Index: src/test/org/apache/lucene/index/TestIndexReader.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexReader.java (revision 788015) +++ src/test/org/apache/lucene/index/TestIndexReader.java (working copy) @@ -438,52 +438,6 @@ reader.close(); } - // Make sure attempts to make changes after reader is - // closed throws IOException: - public void testChangesAfterClose() throws IOException - { - Directory dir = new RAMDirectory(); - - IndexWriter writer = null; - IndexReader reader = null; - Term searchTerm = new Term("content", "aaa"); - - // add 11 documents with term : aaa - writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); - for (int i = 0; i < 11; i++) - { - addDoc(writer, searchTerm.text()); - } - writer.close(); - - reader = IndexReader.open(dir); - - // Close reader: - reader.close(); - - // Then, try to make changes: - try { - reader.deleteDocument(4); - fail("deleteDocument after close failed to throw IOException"); - } catch (AlreadyClosedException e) { - // expected - } - - try { - reader.setNorm(5, "aaa", 2.0f); - fail("setNorm after close failed to throw IOException"); - } catch (AlreadyClosedException e) { - // expected - } - - try { - reader.undeleteAll(); - fail("undeleteAll after close failed to throw IOException"); - } catch (AlreadyClosedException e) { - // expected - } - } - // Make sure we get lock obtain failed exception with 2 writers: public void testLockObtainFailed() throws IOException { Index: src/test/org/apache/lucene/index/TestIndexReaderReopen.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexReaderReopen.java (revision 788015) +++ src/test/org/apache/lucene/index/TestIndexReaderReopen.java (working copy) @@ -1151,12 +1151,6 @@ IndexReader r3 = r1.reopen(); assertTrue(r1 != r3); r1.close(); - try { - r1.document(2); - fail("did not hit exception"); - } catch (AlreadyClosedException ace) { - // expected - } r3.close(); dir.close(); } Index: src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriter.java (revision 788015) +++ src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -936,25 +936,6 @@ } } - public void testChangesAfterClose() throws IOException { - Directory dir = new RAMDirectory(); - - IndexWriter writer = null; - - writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); - addDoc(writer); - - // close - writer.close(); - try { - addDoc(writer); - fail("did not hit AlreadyClosedException"); - } catch (AlreadyClosedException e) { - // expected - } - } - - // Simulate a corrupt index by removing one of the cfs // files and make sure we get an IOException trying to // open the index: Index: tags/lucene_2_4_back_compat_tests_20090623a/src/test/org/apache/lucene/index/TestIndexReader.java =================================================================== --- tags/lucene_2_4_back_compat_tests_20090623a/src/test/org/apache/lucene/index/TestIndexReader.java (revision 787995) +++ tags/lucene_2_4_back_compat_tests_20090623a/src/test/org/apache/lucene/index/TestIndexReader.java (working copy) @@ -384,52 +384,6 @@ reader.close(); } - // Make sure attempts to make changes after reader is - // closed throws IOException: - public void testChangesAfterClose() throws IOException - { - Directory dir = new RAMDirectory(); - - IndexWriter writer = null; - IndexReader reader = null; - Term searchTerm = new Term("content", "aaa"); - - // add 11 documents with term : aaa - writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); - for (int i = 0; i < 11; i++) - { - addDoc(writer, searchTerm.text()); - } - writer.close(); - - reader = IndexReader.open(dir); - - // Close reader: - reader.close(); - - // Then, try to make changes: - try { - reader.deleteDocument(4); - fail("deleteDocument after close failed to throw IOException"); - } catch (AlreadyClosedException e) { - // expected - } - - try { - reader.setNorm(5, "aaa", 2.0f); - fail("setNorm after close failed to throw IOException"); - } catch (AlreadyClosedException e) { - // expected - } - - try { - reader.undeleteAll(); - fail("undeleteAll after close failed to throw IOException"); - } catch (AlreadyClosedException e) { - // expected - } - } - // Make sure we get lock obtain failed exception with 2 writers: public void testLockObtainFailed() throws IOException { Index: tags/lucene_2_4_back_compat_tests_20090623a/src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- tags/lucene_2_4_back_compat_tests_20090623a/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 787995) +++ tags/lucene_2_4_back_compat_tests_20090623a/src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -948,25 +948,6 @@ } } - public void testChangesAfterClose() throws IOException { - Directory dir = new RAMDirectory(); - - IndexWriter writer = null; - - writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); - addDoc(writer); - - // close - writer.close(); - try { - addDoc(writer); - fail("did not hit AlreadyClosedException"); - } catch (AlreadyClosedException e) { - // expected - } - } - - // Simulate a corrupt index by removing one of the cfs // files and make sure we get an IOException trying to // open the index: