Index: src/test/org/apache/lucene/index/TestIndexReaderClone.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReaderClone.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestIndexReaderClone.java	(revision 0)
@@ -0,0 +1,159 @@
+package org.apache.lucene.index;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.lucene.index.SegmentReader.Norm;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Tests cloning multiple types of readers, modifying the deletedDocs and norms
+ * and verifies copy on write semantics of the deletedDocs and norms is implemented properly
+ */
+public class TestIndexReaderClone extends LuceneTestCase {
+  
+  public void testParallelReader() throws Exception {
+    final Directory dir1 = new RAMDirectory();
+    TestIndexReaderReopen.createIndex(dir1, true);
+    final Directory dir2 = new RAMDirectory();
+    TestIndexReaderReopen.createIndex(dir2, true);
+    IndexReader r1 = IndexReader.open(dir1);
+    IndexReader r2 = IndexReader.open(dir2);
+    
+    ParallelReader pr1 = new ParallelReader();
+    pr1.add(r1);
+    pr1.add(r2);
+    
+    performDefaultTests(pr1);
+  }
+  
+  /**
+   * 1. Get a norm from the original reader
+   * 2. Clone the original reader
+   * 3. Delete a document and set the norm of the cloned reader
+   * 4. Verify the norms are not the same on each reader
+   * 5. Verify the doc deleted is only in the cloned reader
+   * 6. Try to delete a document in the original reader, an exception should be thrown
+   * @param r1 IndexReader to perform tests on
+   * @throws Exception
+   */
+  private void performDefaultTests(IndexReader r1) throws Exception {
+    float norm1 = Similarity.decodeNorm(r1.norms("field1")[4]);
+    
+    IndexReader pr1Clone = (IndexReader)r1.clone();
+    pr1Clone.deleteDocument(10);
+    pr1Clone.setNorm(4, "field1", 0.5f);
+    assertTrue(Similarity.decodeNorm(r1.norms("field1")[4]) == norm1);
+    assertTrue(Similarity.decodeNorm(pr1Clone.norms("field1")[4]) != norm1);
+    
+    assertTrue(!r1.isDeleted(10));
+    assertTrue(pr1Clone.isDeleted(10));
+    
+    boolean exceptionThrown = false;
+    // try to update the original reader, which should throw an exception
+    try {
+      r1.deleteDocument(11);
+    } catch (Exception exception) {
+      exceptionThrown = true;
+    }
+    assertEquals("Tried to delete doc 11 and an exception should have been thrown", exceptionThrown, true);
+  }
+  
+  public void testMixedReaders() throws Exception {
+    final Directory dir1 = new RAMDirectory();
+    TestIndexReaderReopen.createIndex(dir1, true);
+    final Directory dir2 = new RAMDirectory();
+    TestIndexReaderReopen.createIndex(dir2, true);
+    IndexReader r1 = IndexReader.open(dir1);
+    IndexReader r2 = IndexReader.open(dir2);
+    
+    MultiReader multiReader = new MultiReader(new IndexReader[] {r1, r2});
+    performDefaultTests(multiReader);
+  }
+  
+  public void testSegmentReaderUndeleteall() throws Exception {
+    final Directory dir1 = new RAMDirectory();
+    TestIndexReaderReopen.createIndex(dir1, false);
+    SegmentReader origSegmentReader = (SegmentReader)IndexReader.open(dir1);
+    origSegmentReader.deleteDocument(10);
+    assertDelDocsRefCountEquals(1, origSegmentReader);
+    origSegmentReader.undeleteAll();
+    assertNull(origSegmentReader.deletedDocsRef);
+    // need to test norms?
+  }
+  
+  public void testSegmentReaderCloseReferencing() throws Exception {
+    final Directory dir1 = new RAMDirectory();
+    TestIndexReaderReopen.createIndex(dir1, false);
+    SegmentReader origSegmentReader = (SegmentReader)IndexReader.open(dir1);
+    origSegmentReader.deleteDocument(1);
+    origSegmentReader.setNorm(4, "field1", 0.5f);
+    
+    SegmentReader clonedSegmentReader = (SegmentReader)origSegmentReader.clone();
+    assertDelDocsRefCountEquals(2, origSegmentReader);
+    origSegmentReader.close();
+    assertDelDocsRefCountEquals(1, origSegmentReader);
+    // check the norm refs
+    Norm norm = (Norm)origSegmentReader.norms.get("field1");
+    assertEquals(1, norm.bytesRef.refCount());
+  }
+  
+  public void testSegmentReaderDelDocsReferenceCounting() throws Exception {
+    final Directory dir1 = new RAMDirectory();
+    TestIndexReaderReopen.createIndex(dir1, false);
+    SegmentReader origSegmentReader = (SegmentReader)IndexReader.open(dir1);
+    // deletedDocsRef should be null because nothing has updated yet
+    assertNull(origSegmentReader.deletedDocsRef);
+    
+    // we deleted a document, so there is now a deletedDocs bitvector and a reference to it
+    origSegmentReader.deleteDocument(1);
+    assertDelDocsRefCountEquals(1, origSegmentReader);
+    
+    // the cloned segmentreader should have 2 references, 1 to itself, and 1 to the original segmentreader
+    SegmentReader clonedSegmentReader = (SegmentReader)origSegmentReader.clone();
+    assertDelDocsRefCountEquals(2, origSegmentReader);
+    // deleting a document creates a new deletedDocs bitvector, the refs goes to 1
+    clonedSegmentReader.deleteDocument(2);
+    assertDelDocsRefCountEquals(1, origSegmentReader);
+    assertDelDocsRefCountEquals(1, clonedSegmentReader);
+    
+    // make sure the deletedocs objects are different
+    assertTrue(origSegmentReader.deletedDocs != clonedSegmentReader.deletedDocs);
+    
+    assertDocDeleted(origSegmentReader, clonedSegmentReader, 1);
+    assertTrue(!origSegmentReader.isDeleted(2)); // doc 2 should not be deleted in original segmentreader
+    assertTrue(clonedSegmentReader.isDeleted(2)); // doc 2 should be deleted in cloned segmentreader
+    
+    boolean deleteDocumentException = false;
+    // deleting a doc from the original segmentreader should throw an exception
+    try {
+      origSegmentReader.deleteDocument(4);
+    } catch (LockObtainFailedException lbfe) {
+      deleteDocumentException = true;
+    }
+    assertTrue(deleteDocumentException);
+    
+    origSegmentReader.close();
+    // try closing the original segment reader to see if it affects the clonedSegmentReader
+    clonedSegmentReader.deleteDocument(3);
+    clonedSegmentReader.flush();
+    assertDelDocsRefCountEquals(1, clonedSegmentReader);
+    
+    // test a reopened reader
+    SegmentReader reopenedSegmentReader = (SegmentReader)clonedSegmentReader.reopen();
+    SegmentReader cloneSegmentReader2 = (SegmentReader)reopenedSegmentReader.clone();
+    assertDelDocsRefCountEquals(2, cloneSegmentReader2); 
+  }
+  
+  private void assertDocDeleted(SegmentReader reader, SegmentReader reader2, int doc) {
+    assertEquals(reader.isDeleted(doc), reader2.isDeleted(doc));
+  }
+  
+  private void assertDelDocsRefCountEquals(int refCount, SegmentReader reader) {      
+    assertEquals(refCount, reader.deletedDocsRef.refCount());
+  }
+}

Property changes on: src/test/org/apache/lucene/index/TestIndexReaderClone.java
___________________________________________________________________
Name: svn:mime-type
   + text/plain
Name: svn:keywords
   + "Date Rev Author URL Id"
Name: svn:eol-style
   + native

Index: src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java	(revision 0)
@@ -0,0 +1,302 @@
+package org.apache.lucene.index;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.SegmentReader.Norm;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * Tests cloning IndexReader norms
+ */
+public class TestIndexReaderCloneNorms extends LuceneTestCase {
+
+  private class SimilarityOne extends DefaultSimilarity {
+    public float lengthNorm(String fieldName, int numTerms) {
+      return 1;
+    }
+  }
+
+  private static final int NUM_FIELDS = 10;
+
+  private Similarity similarityOne;
+
+  private Analyzer anlzr;
+
+  private int numDocNorms;
+
+  private ArrayList norms;
+
+  private ArrayList modifiedNorms;
+
+  private float lastNorm = 0;
+
+  private float normDelta = (float) 0.001;
+
+  public TestIndexReaderCloneNorms(String s) {
+    super(s);
+  }
+
+  protected void setUp() throws Exception {
+    super.setUp();
+    similarityOne = new SimilarityOne();
+    anlzr = new StandardAnalyzer();
+  }
+  
+  /**
+   * Test that norms values are preserved as the index is maintained. Including
+   * separate norms. Including merging indexes with seprate norms. Including
+   * optimize.
+   */
+  public void testNorms() throws IOException {
+    // tmp dir
+    String tempDir = System.getProperty("java.io.tmpdir");
+    if (tempDir == null) {
+      throw new IOException("java.io.tmpdir undefined, cannot run test");
+    }
+
+    // test with a single index: index1
+    File indexDir1 = new File(tempDir, "lucenetestindex1");
+    Directory dir1 = FSDirectory.getDirectory(indexDir1);
+    IndexWriter.unlock(dir1);
+
+    norms = new ArrayList();
+    modifiedNorms = new ArrayList();
+
+    createIndex(dir1);
+    doTestNorms(dir1);
+
+    // test with a single index: index2
+    ArrayList norms1 = norms;
+    ArrayList modifiedNorms1 = modifiedNorms;
+    int numDocNorms1 = numDocNorms;
+
+    norms = new ArrayList();
+    modifiedNorms = new ArrayList();
+    numDocNorms = 0;
+
+    File indexDir2 = new File(tempDir, "lucenetestindex2");
+    Directory dir2 = FSDirectory.getDirectory(indexDir2);
+
+    createIndex(dir2);
+    doTestNorms(dir2);
+
+    // add index1 and index2 to a third index: index3
+    File indexDir3 = new File(tempDir, "lucenetestindex3");
+    Directory dir3 = FSDirectory.getDirectory(indexDir3);
+
+    createIndex(dir3);
+    IndexWriter iw = new IndexWriter(dir3, anlzr, false,
+        IndexWriter.MaxFieldLength.LIMITED);
+    iw.setMaxBufferedDocs(5);
+    iw.setMergeFactor(3);
+    iw.addIndexes(new Directory[] { dir1, dir2 });
+    iw.close();
+
+    norms1.addAll(norms);
+    norms = norms1;
+    modifiedNorms1.addAll(modifiedNorms);
+    modifiedNorms = modifiedNorms1;
+    numDocNorms += numDocNorms1;
+
+    // test with index3
+    verifyIndex(dir3);
+    doTestNorms(dir3);
+
+    // now with optimize
+    iw = new IndexWriter(dir3, anlzr, false, IndexWriter.MaxFieldLength.LIMITED);
+    iw.setMaxBufferedDocs(5);
+    iw.setMergeFactor(3);
+    iw.optimize();
+    iw.close();
+    verifyIndex(dir3);
+
+    dir1.close();
+    dir2.close();
+    dir3.close();
+  }
+
+  // try cloning and reopening the norms
+  private void doTestNorms(Directory dir) throws IOException {
+    addDocs(dir, 12, true);
+    IndexReader ir = IndexReader.open(dir);
+    verifyIndex(ir);
+    modifyNormsForF1(ir);
+    IndexReader irc = (IndexReader) ir.clone();// IndexReader.open(dir);//ir.clone();
+    verifyIndex(irc);
+
+    modifyNormsForF1(irc);
+
+    IndexReader irc3 = (IndexReader) irc.clone();
+    verifyIndex(irc3);
+    modifyNormsForF1(irc3);
+    verifyIndex(irc3);
+    irc3.flush();
+    irc3.close();
+  }
+  
+  public void testNormsClose() throws IOException { 
+    Directory dir1 = new RAMDirectory(); 
+    TestIndexReaderReopen.createIndex(dir1, false);
+    SegmentReader reader1 = (SegmentReader) IndexReader.open(dir1);
+    SegmentReader reader2 = (SegmentReader)reader1.clone();
+    reader1.close();
+    reader2.norms("field1");
+  }
+  
+  public void testNormsRefCounting() throws IOException { 
+    Directory dir1 = new RAMDirectory(); 
+    TestIndexReaderReopen.createIndex(dir1, false);
+    SegmentReader reader1 = (SegmentReader) IndexReader.open(dir1);
+        
+    SegmentReader reader2C = (SegmentReader)reader1.clone();
+    reader2C.norms("field1"); // load the norms for the field
+    Norm reader2CNorm = (Norm)reader2C.norms.get("field1");
+    reader2C.norms("field1");
+    assertTrue(reader2CNorm.bytesRef != null && reader2CNorm.bytesRef.refCount() == 1);
+    
+    // generate a cannot update exception in reader1
+    boolean exceptionThrown = false;
+    try {
+      reader1.setNorm(1, "field1", 0.99f);
+    } catch (Exception ex) {
+      exceptionThrown = true;
+    }
+    assertTrue(exceptionThrown);
+    
+    SegmentReader reader3C = (SegmentReader)reader2C.clone();
+    Norm reader3CCNorm = (Norm)reader3C.norms.get("field1");
+    assertEquals(2, reader3CCNorm.bytesRef.refCount());
+    
+    // edit a norm and the refcount should be 1
+    SegmentReader reader4C = (SegmentReader)reader3C.clone();
+    assertEquals(3, reader3CCNorm.bytesRef.refCount());
+    reader4C.setNorm(5, "field1", 0.33f);
+    
+    // norm values should be different 
+    assertTrue(Similarity.decodeNorm(reader3C.norms("field1")[5]) != Similarity.decodeNorm(reader4C.norms("field1")[5]));
+    Norm reader4CCNorm = (Norm)reader4C.norms.get("field1");
+    assertEquals(2, reader3CCNorm.bytesRef.refCount());
+    assertEquals(1, reader4CCNorm.bytesRef.refCount());
+        
+    SegmentReader reader5C = (SegmentReader)reader4C.clone();
+    Norm reader5CCNorm = (Norm)reader5C.norms.get("field1");
+    reader5C.setNorm(5, "field1", 0.7f);
+    assertEquals(1, reader5CCNorm.bytesRef.refCount());    
+  }
+  
+  private void createIndex(Directory dir) throws IOException {
+    IndexWriter iw = new IndexWriter(dir, anlzr, true,
+        IndexWriter.MaxFieldLength.LIMITED);
+    iw.setMaxBufferedDocs(5);
+    iw.setMergeFactor(3);
+    iw.setSimilarity(similarityOne);
+    iw.setUseCompoundFile(true);
+    iw.close();
+  }
+
+  private void modifyNormsForF1(Directory dir) throws IOException {
+    IndexReader ir = IndexReader.open(dir);
+    modifyNormsForF1(ir);
+  }
+
+  private void modifyNormsForF1(IndexReader ir) throws IOException {
+    int n = ir.maxDoc();
+    // System.out.println("modifyNormsForF1 maxDoc: "+n);
+    for (int i = 0; i < n; i += 3) { // modify for every third doc
+      int k = (i * 3) % modifiedNorms.size();
+      float origNorm = ((Float) modifiedNorms.get(i)).floatValue();
+      float newNorm = ((Float) modifiedNorms.get(k)).floatValue();
+      // System.out.println("Modifying: for "+i+" from "+origNorm+" to
+      // "+newNorm);
+      // System.out.println(" and: for "+k+" from "+newNorm+" to "+origNorm);
+      modifiedNorms.set(i, new Float(newNorm));
+      modifiedNorms.set(k, new Float(origNorm));
+      ir.setNorm(i, "f" + 1, newNorm);
+      ir.setNorm(k, "f" + 1, origNorm);
+      // System.out.println("setNorm i: "+i);
+      // break;
+    }
+    // ir.close();
+  }
+
+  private void verifyIndex(Directory dir) throws IOException {
+    IndexReader ir = IndexReader.open(dir);
+    verifyIndex(ir);
+    ir.close();
+  }
+
+  private void verifyIndex(IndexReader ir) throws IOException {
+    for (int i = 0; i < NUM_FIELDS; i++) {
+      String field = "f" + i;
+      byte b[] = ir.norms(field);
+      assertEquals("number of norms mismatches", numDocNorms, b.length);
+      ArrayList storedNorms = (i == 1 ? modifiedNorms : norms);
+      for (int j = 0; j < b.length; j++) {
+        float norm = Similarity.decodeNorm(b[j]);
+        float norm1 = ((Float) storedNorms.get(j)).floatValue();
+        assertEquals("stored norm value of " + field + " for doc " + j + " is "
+            + norm + " - a mismatch!", norm, norm1, 0.000001);
+      }
+    }
+  }
+
+  private void addDocs(Directory dir, int ndocs, boolean compound)
+      throws IOException {
+    IndexWriter iw = new IndexWriter(dir, anlzr, false,
+        IndexWriter.MaxFieldLength.LIMITED);
+    iw.setMaxBufferedDocs(5);
+    iw.setMergeFactor(3);
+    iw.setSimilarity(similarityOne);
+    iw.setUseCompoundFile(compound);
+    for (int i = 0; i < ndocs; i++) {
+      iw.addDocument(newDoc());
+    }
+    iw.close();
+  }
+
+  // create the next document
+  private Document newDoc() {
+    Document d = new Document();
+    float boost = nextNorm();
+    for (int i = 0; i < 10; i++) {
+      Field f = new Field("f" + i, "v" + i, Store.NO, Index.NOT_ANALYZED);
+      f.setBoost(boost);
+      d.add(f);
+    }
+    return d;
+  }
+
+  // return unique norm values that are unchanged by encoding/decoding
+  private float nextNorm() {
+    float norm = lastNorm + normDelta;
+    do {
+      float norm1 = Similarity.decodeNorm(Similarity.encodeNorm(norm));
+      if (norm1 > lastNorm) {
+        // System.out.println(norm1+" > "+lastNorm);
+        norm = norm1;
+        break;
+      }
+      norm += normDelta;
+    } while (true);
+    norms.add(numDocNorms, new Float(norm));
+    modifiedNorms.add(numDocNorms, new Float(norm));
+    // System.out.println("creating norm("+numDocNorms+"): "+norm);
+    numDocNorms++;
+    lastNorm = (norm > 10 ? 0 : norm); // there's a limit to how many distinct
+                                        // values can be stored in a ingle byte
+    return norm;
+  }
+}

Property changes on: src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
___________________________________________________________________
Name: svn:mime-type
   + text/plain
Name: svn:keywords
   + "Date Rev Author URL Id"
Name: svn:eol-style
   + native

Index: src/test/org/apache/lucene/index/TestIndexReaderReopen.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReaderReopen.java	(revision 729016)
+++ src/test/org/apache/lucene/index/TestIndexReaderReopen.java	(working copy)
@@ -852,7 +852,8 @@
     }
   }
   
-  private static void createIndex(Directory dir, boolean multiSegment) throws IOException {
+  public static void createIndex(Directory dir, boolean multiSegment) throws IOException {
+    IndexWriter.unlock(dir);
     IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
     
     w.setMergePolicy(new LogDocMergePolicy());
Index: src/java/org/apache/lucene/index/DirectoryIndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/DirectoryIndexReader.java	(revision 729016)
+++ src/java/org/apache/lucene/index/DirectoryIndexReader.java	(working copy)
@@ -36,7 +36,7 @@
  * "own" the directory, which means that they try to acquire a write lock
  * whenever index modifications are performed.
  */
-abstract class DirectoryIndexReader extends IndexReader {
+abstract class DirectoryIndexReader extends IndexReader implements Cloneable {
   protected Directory directory;
   protected boolean closeDirectory;
   private IndexDeletionPolicy deletionPolicy;
@@ -138,29 +138,50 @@
 
     return reader;
   }
-
+  
   public final synchronized IndexReader reopen() throws CorruptIndexException, IOException {
+    return doReopen(false);
+  }
+  
+  public final synchronized Object clone() {
+    try {
+      return doReopen(true);
+    } catch (Exception ex) {
+      throw new RuntimeException("", ex);
+    }
+  }
+  
+  protected final synchronized IndexReader doReopen(final boolean doClone) throws CorruptIndexException, IOException {
     ensureOpen();
-
-    if (this.hasChanges || this.isCurrent()) {
+    if (!doClone && (this.hasChanges || this.isCurrent())) {
+      // if we're cloning, then this does not apply
       // this has changes, therefore we have the lock and don't need to reopen
       // OR: the index in the directory hasn't changed - nothing to do here
       return this;
     }
-
     final SegmentInfos.FindSegmentsFile finder = new SegmentInfos.FindSegmentsFile(directory) {
 
       protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
-        SegmentInfos infos = new SegmentInfos();
-        infos.read(directory, segmentFileName);
-
-        DirectoryIndexReader newReader = doReopen(infos);
+        SegmentInfos infos = null;
+        if (doClone) {
+          infos = (SegmentInfos)segmentInfos.clone();
+        } else {
+          infos = new SegmentInfos();
+          infos.read(directory, segmentFileName);
+        }
+        DirectoryIndexReader newReader = doReopen(infos, doClone);
         
         if (DirectoryIndexReader.this != newReader) {
           newReader.init(directory, infos, closeDirectory, readOnly);
           newReader.deletionPolicy = deletionPolicy;
         }
-
+        // copy the writeLock if there is one to the new reader and make the current
+        // reader readOnly and null the writeLock
+        if (doClone) {
+          newReader.writeLock = writeLock;
+          writeLock = null;
+          readOnly = true;
+        }
         return newReader;
       }
     };
@@ -197,7 +218,7 @@
   /**
    * Re-opens the index using the passed-in SegmentInfos 
    */
-  protected abstract DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException;
+  protected abstract DirectoryIndexReader doReopen(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException;
   
   public void setDeletionPolicy(IndexDeletionPolicy deletionPolicy) {
     this.deletionPolicy = deletionPolicy;
@@ -348,6 +369,9 @@
    * @throws IOException if there is a low-level IO error
    */
   protected void acquireWriteLock() throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
+    if (readOnly) {
+      throw new LockObtainFailedException("This IndexReader cannot make any changes to the index (as it is readonly)");
+    }
     if (segmentInfos != null) {
       ensureOpen();
       if (stale)
Index: src/java/org/apache/lucene/index/ParallelReader.java
===================================================================
--- src/java/org/apache/lucene/index/ParallelReader.java	(revision 729016)
+++ src/java/org/apache/lucene/index/ParallelReader.java	(working copy)
@@ -122,7 +122,15 @@
     }
     decrefOnClose.add(Boolean.valueOf(incRefReaders));
   }
-
+  
+  public synchronized Object clone() {
+    try {
+      return doReopen(true);
+    } catch (Exception ex) {
+      throw new RuntimeException("", ex);
+    }
+  }
+  
   /**
    * Tries to reopen the subreaders.
    * <br>
@@ -142,7 +150,11 @@
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error 
    */
-  public IndexReader reopen() throws CorruptIndexException, IOException {
+  public synchronized IndexReader reopen() throws CorruptIndexException, IOException {
+    return doReopen(false);
+  }
+    
+  protected IndexReader doReopen(boolean doClone) throws CorruptIndexException, IOException {
     ensureOpen();
     
     boolean reopened = false;
@@ -155,7 +167,9 @@
     
       for (int i = 0; i < readers.size(); i++) {
         IndexReader oldReader = (IndexReader) readers.get(i);
-        IndexReader newReader = oldReader.reopen();
+        IndexReader newReader = null;
+        if (doClone) newReader = (IndexReader)oldReader.clone();
+        else newReader = oldReader.reopen();
         newReaders.add(newReader);
         // if at least one of the subreaders was updated we remember that
         // and return a new MultiReader
Index: src/java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentReader.java	(revision 729016)
+++ src/java/org/apache/lucene/index/SegmentReader.java	(working copy)
@@ -48,12 +48,13 @@
 
   FieldInfos fieldInfos;
   private FieldsReader fieldsReaderOrig = null;
-
+  CloseableThreadLocal fieldsReaderLocal = new FieldsReaderLocal();
   TermInfosReader tis;
   TermVectorsReader termVectorsReaderOrig = null;
   CloseableThreadLocal termVectorsLocal = new CloseableThreadLocal();
 
   BitVector deletedDocs = null;
+  Ref deletedDocsRef = null;
   private boolean deletedDocsDirty = false;
   private boolean normsDirty = false;
   private boolean undeleteAll = false;
@@ -64,7 +65,6 @@
   private boolean rollbackUndeleteAll = false;
   private int rollbackPendingDeleteCount;
   private boolean readOnly;
-
   IndexInput freqStream;
   IndexInput proxStream;
 
@@ -87,12 +87,38 @@
       return (FieldsReader) fieldsReaderOrig.clone();
     }
   }
-  CloseableThreadLocal fieldsReaderLocal = new FieldsReaderLocal();
   
-  private class Norm {
+  static class Ref {
+    private int refCount = 1;
+    
+    public synchronized int refCount() {
+      return refCount;
+    }
+    
+    public synchronized void incRef() {
+      assert refCount > 0;
+      refCount++;
+    }
+
+    public synchronized void decRef() {
+      assert refCount > 0;
+      refCount--;
+    }
+  }
+  
+  /**
+   * Byte[] referencing is used because a new norm object needs 
+   * to be created for each clone, and the byte array is all 
+   * that is needed for sharing between cloned readers.  The 
+   * current norm referencing is for sharing between readers 
+   * whereas the byte[] referencing is for copy on write which 
+   * is independent of reader references.
+   */
+  class Norm {  
     volatile int refCount;
     boolean useSingleNormStream;
     
+    
     public synchronized void incRef() {
       assert refCount > 0;
       refCount++;
@@ -104,7 +130,7 @@
         close();
       }
       refCount--;
-
+      if (bytesRef != null && bytesRef.refCount() > 1) bytesRef.decRef();
     }
     
     public Norm(IndexInput in, boolean useSingleNormStream, int number, long normSeek)
@@ -115,13 +141,53 @@
       this.normSeek = normSeek;
       this.useSingleNormStream = useSingleNormStream;
     }
-
+    Ref bytesRef;
     private IndexInput in;
     private byte[] bytes;
     private boolean dirty;
     private int number;
     private long normSeek;
     private boolean rollbackDirty;
+    
+    private Norm(boolean useSingleNormStream, byte[] bytes, int number, boolean dirty, long normSeek, boolean rollbackDirty, IndexInput in, Ref bytesRef) {
+      this.useSingleNormStream = useSingleNormStream;
+      this.bytes = bytes;
+      this.number = number;
+      this.dirty = dirty;
+      this.normSeek = normSeek;
+      this.rollbackDirty = rollbackDirty;
+      this.in = in;
+      this.bytesRef = bytesRef;
+      refCount = 1;
+    }
+    
+    private synchronized void cloneBytes() {
+      Ref oldRef = bytesRef;
+      bytes = cloneNormBytes(bytes);
+      bytesRef = new Ref();
+      if (oldRef != null) oldRef.decRef();
+    }
+    
+    public synchronized Object cloneNorm() throws IOException {
+      if (bytesRef == null) bytesRef = new Ref();
+      else bytesRef.incRef();
+      IndexInput newIn = null;
+      long newNormSeek = normSeek;
+      if (in != null && useSingleNormStream) {
+        newIn = (IndexInput)in.clone();
+      } else if (!useSingleNormStream) {
+        FieldInfo fi = fieldInfos.fieldInfo(number);
+        Directory d = null;
+        String fileName = si.getNormFileName(fi.number);
+        if (!si.hasSeparateNorms(fi.number)) {
+          d = si.getUseCompoundFile() ? cfsReader : directory();
+        }
+        newIn = d.openInput(fileName);
+        newNormSeek = 0;
+      }
+      Norm norm = new Norm(useSingleNormStream, bytes, number, dirty, normSeek, rollbackDirty, newIn, bytesRef);
+      return norm;
+    }
 
     private void reWrite(SegmentInfo si) throws IOException {
       // NOTE: norms are re-written in regular directory, not cfs
@@ -177,6 +243,17 @@
       Norm norm = (Norm) it.next();
       norm.decRef();
     }
+    //if (deletedDocsRef != null && deletedDocsRef.refCount() > 1) deletedDocsRef.decRef();
+    
+    if (deletedDocsRef != null) {
+      assert deletedDocs != null;
+      synchronized (deletedDocsRef) {
+        deletedDocsRef.decRef();
+      }
+    } else {
+      assert deletedDocs == null;
+      assert deletedDocsRef == null;
+    }
   }
   
   private synchronized void decRefReaderNotNorms() throws IOException {
@@ -410,6 +487,7 @@
     // NOTE: the bitvector is stored using the regular directory, not cfs
     if (hasDeletions(si)) {
       deletedDocs = new BitVector(directory(), si.getDelFileName());
+      deletedDocsRef = new Ref();
      
       assert si.getDelCount() == deletedDocs.count() : 
         "delete count mismatch: info=" + si.getDelCount() + " vs BitVector=" + deletedDocs.count();
@@ -423,13 +501,33 @@
       assert si.getDelCount() == 0;
   }
   
-  protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException {
+  /**
+   * Clones the norm bytes.  May be overridden by subclasses.  New and experimental.
+   * @param bv Byte array to clone
+   * @return New BitVector
+   */
+  protected byte[] cloneNormBytes(byte[] bytes) {
+    byte[] cloneBytes = new byte[bytes.length];
+    System.arraycopy(bytes, 0, cloneBytes, 0, bytes.length);
+    return cloneBytes;
+  }
+  
+  /**
+   * Clones the deleteDocs BitVector.  May be overridden by subclasses. New and experimental.
+   * @param bv BitVector to clone
+   * @return New BitVector
+   */
+  protected BitVector cloneDeletedDocs(BitVector bv) {
+    return (BitVector)bv.clone();
+  }
+  
+  protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException {
     DirectoryIndexReader newReader;
     
     if (infos.size() == 1) {
       SegmentInfo si = infos.info(0);
       if (segment.equals(si.name) && si.getUseCompoundFile() == SegmentReader.this.si.getUseCompoundFile()) {
-        newReader = reopenSegment(si);
+        newReader = reopenSegment(si, doClone);
       } else { 
         // segment not referenced anymore, reopen not possible
         // or segment format changed
@@ -437,19 +535,18 @@
       }
     } else {
       if (readOnly)
-        return new ReadOnlyMultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null);
+        return new ReadOnlyMultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, doClone);
       else
-        return new MultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, false);
+        return new MultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, false, doClone);
     }
     
     return newReader;
   }
   
-  synchronized SegmentReader reopenSegment(SegmentInfo si) throws CorruptIndexException, IOException {
+  synchronized SegmentReader reopenSegment(SegmentInfo si, boolean doClone) throws CorruptIndexException, IOException {
     boolean deletionsUpToDate = (this.si.hasDeletions() == si.hasDeletions()) 
                                   && (!si.hasDeletions() || this.si.getDelFileName().equals(si.getDelFileName()));
     boolean normsUpToDate = true;
-
     
     boolean[] fieldNormsChanged = new boolean[fieldInfos.size()];
     if (normsUpToDate) {
@@ -460,18 +557,21 @@
         }
       }
     }
-
-    if (normsUpToDate && deletionsUpToDate) {
+    // if we're cloning we need to run through the reopenSegment logic
+    if (normsUpToDate && deletionsUpToDate && !doClone) {
       return this;
     }    
-    
 
     // clone reader
     SegmentReader clone;
-    if (readOnly) 
-      clone = new ReadOnlySegmentReader();
-    else
-      clone = new SegmentReader();
+    try {
+      if (readOnly)
+        clone = (SegmentReader)READONLY_IMPL.newInstance();
+      else
+        clone = (SegmentReader)IMPL.newInstance();
+    } catch (Exception e) {
+      throw new RuntimeException("cannot load SegmentReader class: " + e, e);
+    }
 
     boolean success = false;
     try {
@@ -493,14 +593,38 @@
         clone.fieldsReaderOrig = (FieldsReader) fieldsReaderOrig.clone();
       }      
       
-      if (!deletionsUpToDate) {
-        // load deleted docs
-        clone.deletedDocs = null;
-        clone.loadDeletedDocs();
+      // we have to open a new FieldsReader, because it is not thread-safe
+      // and can thus not be shared among multiple SegmentReaders
+      // TODO: Change this in case FieldsReader becomes thread-safe in the future
+      final String fieldsSegment;
+  
+      Directory storeDir = directory();
+      
+      if (si.getDocStoreOffset() != -1) {
+        fieldsSegment = si.getDocStoreSegment();
+        if (storeCFSReader != null) {
+          storeDir = storeCFSReader;
+        }
       } else {
-        clone.deletedDocs = this.deletedDocs;
+        fieldsSegment = segment;
+        if (cfsReader != null) {
+          storeDir = cfsReader;
+        }
       }
-  
+      if (deletedDocsRef != null) deletedDocsRef.incRef();
+      if (doClone) {
+        clone.deletedDocs = deletedDocs;
+        clone.deletedDocsRef = deletedDocsRef;
+      } else {
+        if (!deletionsUpToDate) {
+          // load deleted docs
+          clone.deletedDocs = null;
+          clone.loadDeletedDocs();
+        } else {
+          clone.deletedDocs = deletedDocs;
+          clone.deletedDocsRef = deletedDocsRef;
+        }
+      }
       clone.norms = new HashMap();
       if (!normsUpToDate) {
         // load norms
@@ -509,8 +633,13 @@
           if (!fieldNormsChanged[i]) {
             String curField = fieldInfos.fieldInfo(i).name;
             Norm norm = (Norm) this.norms.get(curField);
-            norm.incRef();
-            clone.norms.put(curField, norm);
+            if (doClone) {
+              Norm newNorm = (Norm)norm.cloneNorm();
+              clone.norms.put(curField, newNorm);
+            } else {
+              norm.incRef();
+              clone.norms.put(curField, norm);
+            }
           }
         }
         
@@ -520,8 +649,13 @@
         while (it.hasNext()) {
           String field = (String) it.next();
           Norm norm = (Norm) norms.get(field);
-          norm.incRef();
-          clone.norms.put(field, norm);
+          if (doClone) {
+            Norm newNorm = (Norm)norm.cloneNorm();
+            clone.norms.put(field, newNorm);
+          } else {
+            norm.incRef();
+            clone.norms.put(field, norm);
+          }
         }
       }
   
@@ -541,8 +675,23 @@
             }
           }
         }  
-      }    
-  
+      }
+      
+      // if we're cloning, we need to clone the actual norm objects and 
+      // share the bytes of the norms
+      /**
+      if (doClone) {
+        Iterator it = norms.keySet().iterator();
+        while (it.hasNext()) {
+          String field = (String) it.next();
+          Norm norm = (Norm) norms.get(field);
+          norm.decRef();
+          Norm newNorm = (Norm)norm.clone();
+          clone.norms.put(field, newNorm);
+        }
+      }
+      **/
+      if (deletedDocs != null) assert deletedDocsRef != null;
       success = true;
     } finally {
       if (this.referencedSegmentReader != null) {
@@ -563,6 +712,7 @@
         // An exception occured during reopen, we have to decRef the norms
         // that we incRef'ed already and close singleNormsStream and FieldsReader
         clone.decRef();
+        System.out.println("messed up reopen");
       }
     }
     
@@ -603,19 +753,35 @@
   FieldsReader getFieldsReader() {
     return (FieldsReader) fieldsReaderLocal.get();
   }
-
+  
+  /**
+   * Decrement the references to the norms, including the byte
+   * references
+   * @throws IOException
+   */
+  private void decRefNorms() throws IOException {
+    Iterator it = norms.keySet().iterator();
+    while (it.hasNext()) {
+      String field = (String) it.next();
+      Norm norm = (Norm) norms.get(field);
+      norm.decRef();
+      if (norm.bytesRef != null && norm.bytesRef.refCount() > 1) norm.bytesRef.decRef();
+    }
+  }
+  
   protected void doClose() throws IOException {
     boolean hasReferencedReader = (referencedSegmentReader != null);
 
     termVectorsLocal.close();
     fieldsReaderLocal.close();
-
+    
     if (hasReferencedReader) {
       referencedSegmentReader.decRefReaderNotNorms();
       referencedSegmentReader = null;
     }
 
     deletedDocs = null;
+    deletedDocsRef = null;
 
     // close the single norms stream
     if (singleNormStream != null) {
@@ -631,7 +797,7 @@
       if (tis != null) {
         tis.close();
       }
-  
+      
       if (freqStream != null)
         freqStream.close();
       if (proxStream != null)
@@ -676,8 +842,21 @@
   }
 
   protected void doDelete(int docNum) {
-    if (deletedDocs == null)
+    if (deletedDocs == null) {
       deletedDocs = new BitVector(maxDoc());
+      deletedDocsRef = new Ref();
+    }
+    // there is more than 1 SegmentReader with a reference to this
+    // deletedDocs BitVector so decRef the current deletedDocsRef,
+    // clone the BitVector, create a new deletedDocsRef
+    synchronized (deletedDocsRef) {
+      if (deletedDocsRef.refCount() > 1) {
+        Ref oldRef = deletedDocsRef;
+        deletedDocs = cloneDeletedDocs(deletedDocs);
+        deletedDocsRef = new Ref();
+        oldRef.decRef();
+      }
+    }
     deletedDocsDirty = true;
     undeleteAll = false;
     if (!deletedDocs.getAndSet(docNum))
@@ -685,9 +864,16 @@
   }
 
   protected void doUndeleteAll() {
+    deletedDocsDirty = false;
+    undeleteAll = true;
+    if (deletedDocs != null) {
+      assert deletedDocsRef != null;
+      deletedDocsRef.decRef();
       deletedDocs = null;
-      deletedDocsDirty = false;
-      undeleteAll = true;
+      deletedDocsRef = null;
+    } else {
+      assert deletedDocsRef == null;
+    }
   }
 
   List files() throws IOException {
@@ -862,10 +1048,18 @@
     Norm norm = (Norm) norms.get(field);
     if (norm == null)                             // not an indexed field
       return;
-
+    // synchronize on the norm to make sure the cloning doesn't conflict 
+    // with other threads
+    synchronized (norm) {
+      // there is more than this reader referencing this norm's bytes
+      // so clone the bytes for this norm
+      if (norm.bytesRef != null && norm.bytesRef.refCount() > 1) {
+        norm.cloneBytes();
+      }
+    }
     norm.dirty = true;                            // mark it dirty
     normsDirty = true;
-
+    
     norms(field)[doc] = value;                    // set the value
   }
 
@@ -879,7 +1073,7 @@
       System.arraycopy(fakeNorms(), 0, bytes, offset, maxDoc());
       return;
     }
-    
+  
     synchronized(norm) {
       if (norm.bytes != null) {                     // can copy from cache
         System.arraycopy(norm.bytes, 0, bytes, offset, maxDoc());
Index: src/java/org/apache/lucene/index/MultiReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiReader.java	(revision 729016)
+++ src/java/org/apache/lucene/index/MultiReader.java	(working copy)
@@ -33,7 +33,7 @@
  *
  * @version $Id$
  */
-public class MultiReader extends IndexReader {
+public class MultiReader extends IndexReader implements Cloneable {
   protected IndexReader[] subReaders;
   private int[] starts;                           // 1st docno for each segment
   private boolean[] decrefOnClose;                // remember which subreaders to decRef on close
@@ -87,7 +87,7 @@
     }
     starts[subReaders.length] = maxDoc;
   }
-
+  
   /**
    * Tries to reopen the subreaders.
    * <br>
@@ -107,7 +107,35 @@
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error 
    */
-  public IndexReader reopen() throws CorruptIndexException, IOException {
+  public synchronized IndexReader reopen() throws CorruptIndexException, IOException {
+    return doReopen(false);
+  }
+  
+  /**
+   * Clones the subreaders.
+   * (see {@link IndexReader#clone()}).
+   * <br>
+   * <p>
+   * If subreaders are shared, then the reference count of those
+   * readers is increased to ensure that the subreaders remain open
+   * until the last referring reader is closed.
+   */
+  public synchronized Object clone() {
+    try {
+      return doReopen(true);
+    } catch (Exception ex) {
+      throw new RuntimeException("", ex);
+    }
+  }
+  
+  /**
+   * If clone is true then we clone each of the subreaders
+   * @param doClone
+   * @return
+   * @throws CorruptIndexException
+   * @throws IOException
+   */
+  protected IndexReader doReopen(boolean doClone) throws CorruptIndexException, IOException {
     ensureOpen();
     
     boolean reopened = false;
@@ -117,7 +145,10 @@
     boolean success = false;
     try {
       for (int i = 0; i < subReaders.length; i++) {
-        newSubReaders[i] = subReaders[i].reopen();
+        if (doClone)
+          newSubReaders[i] = (IndexReader)subReaders[i].clone();
+        else
+          newSubReaders[i] = subReaders[i].reopen();
         // if at least one of the subreaders was updated we remember that
         // and return a new MultiReader
         if (newSubReaders[i] != subReaders[i]) {
Index: src/java/org/apache/lucene/index/IndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/IndexReader.java	(revision 729016)
+++ src/java/org/apache/lucene/index/IndexReader.java	(working copy)
@@ -67,7 +67,7 @@
 
  @version $Id$
 */
-public abstract class IndexReader {
+public abstract class IndexReader implements Cloneable {
 
   // NOTE: in 3.0 this will change to true
   final static boolean READ_ONLY_DEFAULT = false;
@@ -329,10 +329,8 @@
    * call is a NOOP and returns this instance. Otherwise, a new instance is 
    * returned. The old instance is <b>not</b> closed and remains usable.<br>
    * <b>Note:</b> The re-opened reader instance and the old instance might share
-   * the same resources. For this reason no index modification operations 
-   * (e. g. {@link #deleteDocument(int)}, {@link #setNorm(int, String, byte)}) 
-   * should be performed using one of the readers until the old reader instance
-   * is closed. <b>Otherwise, the behavior of the readers is undefined.</b> 
+   * the same resources.  Index modifications operations may be performed
+   * on the old reader because of copy on write of norms and deleted docs. 
    * <p>   
    * You can determine whether a reader was actually reopened by comparing the
    * old instance with the instance returned by this method: 
@@ -354,6 +352,23 @@
   public synchronized IndexReader reopen() throws CorruptIndexException, IOException {
     throw new UnsupportedOperationException("This reader does not support reopen().");
   }
+  
+  /**
+   * Clones the IndexReader.
+   * <p>
+   * On cloning a reader with pending modifications, the original reader then
+   * becomes readOnly (in addition to passing the write lock to the
+   * cloned reader).  The deletedDocs and norms are cloned on update to the 
+   * cloned IndexReader constituting a "copy on write" (rather than in the clone 
+   * method to avoid excessive duplication).
+   * <p>
+   * The originator of the clone (this) will throw a LockObtainFailedException if 
+   * {@link #deleteDocument(int)}, {@link #setNorm(int, String, byte)} are called
+   * after it is cloned.
+   */
+  public synchronized Object clone() {
+    throw new UnsupportedOperationException("This reader does not support clone().");
+  }
 
   /** 
    * Returns the directory associated with this index.  The Default 
Index: src/java/org/apache/lucene/index/MultiSegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiSegmentReader.java	(revision 729016)
+++ src/java/org/apache/lucene/index/MultiSegmentReader.java	(working copy)
@@ -33,7 +33,7 @@
 /** 
  * An IndexReader which reads indexes with multiple segments.
  */
-class MultiSegmentReader extends DirectoryIndexReader {
+class MultiSegmentReader extends DirectoryIndexReader implements Cloneable {
   protected SegmentReader[] subReaders;
   private int[] starts;                           // 1st docno for each segment
   private Map normsCache = new HashMap();
@@ -71,7 +71,7 @@
   }
 
   /** This contructor is only used for {@link #reopen()} */
-  MultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache, boolean readOnly) throws IOException {
+  MultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache, boolean readOnly, boolean doClone) throws IOException {
     super(directory, infos, closeDirectory, readOnly);
 
     // we put the old SegmentReaders in a map, that allows us
@@ -109,7 +109,7 @@
           // this is a new reader; in case we hit an exception we can close it safely
           newReader = SegmentReader.get(readOnly, infos.info(i));
         } else {
-          newReader = (SegmentReader) newReaders[i].reopenSegment(infos.info(i));
+          newReader = (SegmentReader) newReaders[i].reopenSegment(infos.info(i), doClone);
         }
         if (newReader == newReaders[i]) {
           // this reader will be shared between the old and the new one,
@@ -181,7 +181,7 @@
       }
     }
   }
-
+  
   private void initialize(SegmentReader[] subReaders) {
     this.subReaders = subReaders;
     starts = new int[subReaders.length + 1];    // build starts array
@@ -195,15 +195,15 @@
     starts[subReaders.length] = maxDoc;
   }
 
-  protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException {
+  protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException {
     if (infos.size() == 1) {
       // The index has only one segment now, so we can't refresh the MultiSegmentReader.
       // Return a new [ReadOnly]SegmentReader instead
       return SegmentReader.get(readOnly, infos, infos.info(0), false);
     } else if (readOnly) {
-      return new ReadOnlyMultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache);
+      return new ReadOnlyMultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache, doClone);
     } else {
-      return new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache, false);
+      return new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache, false, doClone);
     }            
   }
 
Index: src/java/org/apache/lucene/index/ReadOnlyMultiSegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/ReadOnlyMultiSegmentReader.java	(revision 729016)
+++ src/java/org/apache/lucene/index/ReadOnlyMultiSegmentReader.java	(working copy)
@@ -27,8 +27,8 @@
     super(directory, sis, closeDirectory, true);
   }
 
-  ReadOnlyMultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache) throws IOException {
-    super(directory, infos, closeDirectory, oldReaders, oldStarts, oldNormsCache, true);
+  ReadOnlyMultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache, boolean doClone) throws IOException {
+    super(directory, infos, closeDirectory, oldReaders, oldStarts, oldNormsCache, true, doClone);
   }
 
   protected void acquireWriteLock() {
Index: src/java/org/apache/lucene/util/BitVector.java
===================================================================
--- src/java/org/apache/lucene/util/BitVector.java	(revision 729016)
+++ src/java/org/apache/lucene/util/BitVector.java	(working copy)
@@ -35,7 +35,7 @@
 
   @version $Id$
   */
-public final class BitVector {
+public final class BitVector implements Cloneable {
 
   private byte[] bits;
   private int size;
@@ -46,7 +46,19 @@
     size = n;
     bits = new byte[(size >> 3) + 1];
   }
-
+  
+  BitVector(byte[] bits, int size, int count) {
+    this.bits = bits;
+    this.size = size;
+    this.count = count;
+  }
+  
+  public Object clone() {
+    byte[] copyBits = new byte[bits.length];
+    System.arraycopy(bits, 0, copyBits, 0, bits.length);
+    return new BitVector(copyBits, size, count);
+  }
+  
   /** Sets the value of <code>bit</code> to one. */
   public final void set(int bit) {
     if (bit >= size) {
