Index: src/test/org/apache/lucene/index/TestIndexReaderClone.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReaderClone.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestIndexReaderClone.java	(revision 0)
@@ -0,0 +1,78 @@
+package org.apache.lucene.index;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestIndexReaderClone extends LuceneTestCase {
+  public void testDeletedDocsReferenceCounting() throws Exception {
+    final Directory dir1 = new RAMDirectory();
+    TestIndexReaderReopen.createIndex(dir1, false);
+    SegmentReader origSegmentReader = (SegmentReader)IndexReader.open(dir1);
+    assertDelDocsRefCountEquals(0, origSegmentReader);
+    
+    // we deleted a document, so there is now a deletedDocs bitvector and a reference to it
+    origSegmentReader.deleteDocument(1);
+    assertDelDocsRefCountEquals(1, origSegmentReader);
+    
+    // the cloned segmentreader should have 2 references, 1 to itself, and 1 to the original segmentreader
+    SegmentReader clonedSegmentReader = (SegmentReader)origSegmentReader.clone();
+    assertDelDocsRefCountEquals(2, origSegmentReader);
+    // deleting a document creates a new deletedDocs bitvector, the refs goes to 1
+    clonedSegmentReader.deleteDocument(2);
+    assertDelDocsRefCountEquals(1, origSegmentReader);
+    assertDelDocsRefCountEquals(1, clonedSegmentReader);
+    
+    // make sure the deletedocs objects are different
+    assertTrue(origSegmentReader.deletedDocs != clonedSegmentReader.deletedDocs);
+    
+    assertDocDeleted(origSegmentReader, clonedSegmentReader, 1);
+    assertTrue(!origSegmentReader.isDeleted(2)); // doc 2 should not be deleted in original segmentreader
+    assertTrue(clonedSegmentReader.isDeleted(2)); // doc 2 should be deleted in cloned segmentreader
+    
+    
+    // deleting a doc from the original segmentreader should throw an exception
+    try {
+      origSegmentReader.deleteDocument(4);
+    } catch (LockObtainFailedException lbfe) {}
+    origSegmentReader.close();
+    // try closing the original segment reader to see if it affects the clonedSegmentReader
+    clonedSegmentReader.deleteDocument(3);
+    clonedSegmentReader.flush();
+    assertDelDocsRefCountEquals(1, clonedSegmentReader);
+    
+    // test a reopened reader
+    SegmentReader reopenedSegmentReader = (SegmentReader)clonedSegmentReader.reopen();
+    System.out.println("reopenedSegmentReader refcount: "+reopenedSegmentReader.deletedDocsCopyOnWriteRef.refCount());
+    SegmentReader cloneSegmentReader2 = (SegmentReader)reopenedSegmentReader.clone();
+    assertDelDocsRefCountEquals(2, cloneSegmentReader2); // 
+    //cloneSegmentReader2.
+  }
+  
+  
+  
+  public void testNorms() throws Exception {
+    
+  }
+  
+  public void testDeletedDocsClonedDocsDeletes() throws Exception {
+    final Directory dir1 = new RAMDirectory();
+    TestIndexReaderReopen.createIndex(dir1, false);
+    SegmentReader segmentReader = (SegmentReader)IndexReader.open(dir1);
+    segmentReader.deleteDocument(1);
+    
+  }
+  
+  private void assertDocDeleted(SegmentReader reader, SegmentReader reader2, int doc) {
+    assertEquals(reader.isDeleted(doc), reader2.isDeleted(doc));
+  }
+  
+  private void assertDelDocsRefCountEquals(int refCount, SegmentReader reader) {
+    assertEquals(refCount, reader.deletedDocsCopyOnWriteRef.refCount());
+  }
+}

Property changes on: src/test/org/apache/lucene/index/TestIndexReaderClone.java
___________________________________________________________________
Name: svn:mime-type
   + text/plain
Name: svn:keywords
   + "Date Rev Author URL Id"
Name: svn:eol-style
   + native

Index: src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java	(revision 0)
@@ -0,0 +1,306 @@
+package org.apache.lucene.index;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestIndexReaderCloneNorms extends LuceneTestCase {
+
+  private class SimilarityOne extends DefaultSimilarity {
+    public float lengthNorm(String fieldName, int numTerms) {
+      return 1;
+    }
+  }
+
+  private static final int NUM_FIELDS = 10;
+  
+  private Similarity similarityOne;
+  private Analyzer anlzr;
+  private int numDocNorms;
+  private ArrayList norms; 
+  private ArrayList modifiedNorms; 
+  private float lastNorm = 0;
+  private float normDelta = (float) 0.001;
+
+  public TestIndexReaderCloneNorms(String s) {
+    super(s);
+  }
+
+  protected void setUp() throws Exception {
+    super.setUp();
+    similarityOne = new SimilarityOne();
+    anlzr = new StandardAnalyzer();
+  }
+
+  /**
+   * Test that norms values are preserved as the index is maintained.
+   * Including separate norms.
+   * Including merging indexes with seprate norms. 
+   * Including optimize. 
+   */
+  public void testNorms() throws IOException {
+    // tmp dir
+    String tempDir = System.getProperty("java.io.tmpdir");
+    if (tempDir == null) {
+      throw new IOException("java.io.tmpdir undefined, cannot run test");
+    }
+    
+    // test with a single index: index1
+    File indexDir1 = new File(tempDir, "lucenetestindex1");
+    Directory dir1 = FSDirectory.getDirectory(indexDir1);
+
+    norms = new ArrayList();
+    modifiedNorms = new ArrayList();
+
+    createIndex(dir1);
+    doTestNorms(dir1);
+
+    // test with a single index: index2
+    ArrayList norms1 = norms;
+    ArrayList modifiedNorms1 = modifiedNorms;
+    int numDocNorms1 = numDocNorms;
+
+    norms = new ArrayList();
+    modifiedNorms = new ArrayList();
+    numDocNorms = 0;
+    
+    File indexDir2 = new File(tempDir, "lucenetestindex2");
+    Directory dir2 = FSDirectory.getDirectory(indexDir2);
+
+    createIndex(dir2);
+    doTestNorms(dir2);
+
+    // add index1 and index2 to a third index: index3
+    File indexDir3 = new File(tempDir, "lucenetestindex3");
+    Directory dir3 = FSDirectory.getDirectory(indexDir3);
+
+    createIndex(dir3);
+    IndexWriter iw = new IndexWriter(dir3,anlzr,false, IndexWriter.MaxFieldLength.LIMITED);
+    iw.setMaxBufferedDocs(5);
+    iw.setMergeFactor(3);
+    iw.addIndexes(new Directory[]{dir1,dir2});
+    iw.close();
+    
+    norms1.addAll(norms);
+    norms = norms1;
+    modifiedNorms1.addAll(modifiedNorms);
+    modifiedNorms = modifiedNorms1;
+    numDocNorms += numDocNorms1;
+
+    // test with index3
+    verifyIndex(dir3);
+    doTestNorms(dir3);
+    
+    // now with optimize
+    iw = new IndexWriter(dir3,anlzr,false, IndexWriter.MaxFieldLength.LIMITED);
+    iw.setMaxBufferedDocs(5);
+    iw.setMergeFactor(3);
+    iw.optimize();
+    iw.close();
+    verifyIndex(dir3);
+    
+    dir1.close();
+    dir2.close();
+    dir3.close();
+  }
+  
+  // try cloning and reopening the norms
+  private void doTestNorms(Directory dir) throws IOException {
+    for (int i=0; i<5; i++) {
+      System.out.println("doTestNorms i: "+i);
+      addDocs(dir, 12, true);
+      IndexReader ir = IndexReader.open(dir);
+      verifyIndex(ir);
+      modifyNormsForF1(ir);
+      //ir.close();
+      IndexReader irc = ir.clone();//IndexReader.open(dir);//ir.clone();
+      //modifyNormsForF1(irc);
+      verifyIndex(irc);
+      irc.close();
+      
+      // check for file lock
+      assertTrue(!IndexWriter.isLocked(dir));
+        
+      //IndexReader irc2 = ir.clone();
+      //irc2.close();
+      addDocs(dir, 12, false);
+      IndexReader irc3 = IndexReader.open(dir);//ir.reopen();//ir.clone();
+      verifyIndex(irc3);
+      modifyNormsForF1(irc3);
+      verifyIndex(irc3);
+      irc3.flush();
+      irc3.close();
+    }
+  }
+  
+  public void testNormsRefCounting() throws IOException {
+    Directory dir1 = new RAMDirectory();
+    TestIndexReaderReopen.createIndex(dir1, false);
+    
+    SegmentReader reader1 = (SegmentReader) IndexReader.open(dir1);
+    IndexReader modifier = IndexReader.open(dir1);
+    modifier.deleteDocument(0);
+    modifier.close();
+    
+    SegmentReader reader2 = (SegmentReader) reader1.clone();//reader1.reopen();
+    //modifier = IndexReader.open(dir1);
+    
+    reader2.setNorm(1, "field1", 50);
+    reader2.setNorm(1, "field2", 50);
+    //reader2.close();
+    
+    SegmentReader reader3 = (SegmentReader) reader2.clone();//reader2.reopen();
+    modifier = IndexReader.open(dir1);
+    modifier.deleteDocument(2);
+    modifier.close();
+    SegmentReader reader4 = (SegmentReader) reader3.reopen();
+
+    modifier = IndexReader.open(dir1);
+    modifier.deleteDocument(3);
+    modifier.close();
+    SegmentReader reader5 = (SegmentReader) reader3.reopen();
+    
+    // Now reader2-reader5 references reader1. reader1 and reader2
+    // share the same norms. reader3, reader4, reader5 also share norms.
+    TestIndexReaderReopen.assertRefCountEquals(5, reader1);
+    assertFalse(reader1.normsClosed());
+    reader1.close();
+    TestIndexReaderReopen.assertRefCountEquals(4, reader1);
+    assertFalse(reader1.normsClosed());
+    reader2.close();
+    TestIndexReaderReopen.assertRefCountEquals(3, reader1);
+    // now the norms for field1 and field2 should be closed
+    assertTrue(reader1.normsClosed("field1"));
+    assertTrue(reader1.normsClosed("field2"));
+    // but the norms for field3 and field4 should still be open
+    assertFalse(reader1.normsClosed("field3"));
+    assertFalse(reader1.normsClosed("field4"));
+    
+    reader3.close();
+    TestIndexReaderReopen.assertRefCountEquals(2, reader1);
+    assertFalse(reader3.normsClosed());
+    reader5.close();
+    TestIndexReaderReopen.assertRefCountEquals(1, reader1);
+    assertFalse(reader3.normsClosed());
+    reader4.close();
+    TestIndexReaderReopen.assertRefCountEquals(0, reader1);
+    
+    // and now all norms that reader1 used should be closed
+    assertTrue(reader1.normsClosed());
+    
+    // now that reader3, reader4 and reader5 are closed,
+    // the norms that those three readers shared should be
+    // closed as well
+    assertTrue(reader3.normsClosed());
+  }
+
+  private void createIndex(Directory dir) throws IOException {
+    IndexWriter iw = new IndexWriter(dir,anlzr,true, IndexWriter.MaxFieldLength.LIMITED);
+    iw.setMaxBufferedDocs(5);
+    iw.setMergeFactor(3);
+    iw.setSimilarity(similarityOne);
+    iw.setUseCompoundFile(true);
+    iw.close();
+  }
+  
+  private void modifyNormsForF1(Directory dir) throws IOException {
+    IndexReader ir = IndexReader.open(dir);
+    modifyNormsForF1(ir);
+  }
+  
+  private void modifyNormsForF1(IndexReader ir) throws IOException {
+    int n = ir.maxDoc();
+    //System.out.println("modifyNormsForF1 maxDoc: "+n);
+    for (int i = 0; i < n; i+=3) { // modify for every third doc
+      int k = (i*3) % modifiedNorms.size();
+      float origNorm = ((Float)modifiedNorms.get(i)).floatValue();
+      float newNorm = ((Float)modifiedNorms.get(k)).floatValue();
+      //System.out.println("Modifying: for "+i+" from "+origNorm+" to "+newNorm);
+      //System.out.println("      and: for "+k+" from "+newNorm+" to "+origNorm);
+      modifiedNorms.set(i, new Float(newNorm));
+      modifiedNorms.set(k, new Float(origNorm));
+      ir.setNorm(i, "f"+1, newNorm); 
+      ir.setNorm(k, "f"+1, origNorm); 
+      break;
+    }
+    //ir.close();
+  }
+  
+  private void verifyIndex(Directory dir) throws IOException {
+    IndexReader ir = IndexReader.open(dir);
+    verifyIndex(ir);
+    ir.close();
+  }
+
+  private void verifyIndex(IndexReader ir) throws IOException {
+    for (int i = 0; i < NUM_FIELDS; i++) {
+      String field = "f"+i;
+      byte b[] = ir.norms(field);
+      assertEquals("number of norms mismatches",numDocNorms,b.length);
+      ArrayList storedNorms = (i==1 ? modifiedNorms : norms);
+      for (int j = 0; j < b.length; j++) {
+        float norm = Similarity.decodeNorm(b[j]);
+        float norm1 = ((Float)storedNorms.get(j)).floatValue();
+        assertEquals("stored norm value of "+field+" for doc "+j+" is "+norm+" - a mismatch!", norm, norm1, 0.000001);
+      }
+    }
+  }
+
+  private void addDocs(Directory dir, int ndocs, boolean compound) throws IOException {
+    IndexWriter iw = new IndexWriter(dir,anlzr,false, IndexWriter.MaxFieldLength.LIMITED);
+    iw.setMaxBufferedDocs(5);
+    iw.setMergeFactor(3);
+    iw.setSimilarity(similarityOne);
+    iw.setUseCompoundFile(compound);
+    for (int i = 0; i < ndocs; i++) {
+      iw.addDocument(newDoc());
+    }
+    iw.close();
+  }
+
+  // create the next document
+  private Document newDoc() {
+    Document d = new Document();
+    float boost = nextNorm();
+    for (int i = 0; i < 10; i++) {
+      Field f = new Field("f"+i,"v"+i,Store.NO,Index.NOT_ANALYZED);
+      f.setBoost(boost);
+      d.add(f);
+    }
+    return d;
+  }
+
+  // return unique norm values that are unchanged by encoding/decoding
+  private float nextNorm() {
+    float norm = lastNorm + normDelta;
+    do {
+      float norm1 = Similarity.decodeNorm(Similarity.encodeNorm(norm));
+      if (norm1 > lastNorm) {
+        //System.out.println(norm1+" > "+lastNorm);
+        norm = norm1;
+        break;
+      }
+      norm += normDelta;
+    } while (true);
+    norms.add(numDocNorms, new Float(norm));
+    modifiedNorms.add(numDocNorms, new Float(norm));
+    //System.out.println("creating norm("+numDocNorms+"): "+norm);
+    numDocNorms ++;
+    lastNorm = (norm>10 ? 0 : norm); //there's a limit to how many distinct values can be stored in a ingle byte
+    return norm;
+  }
+  
+}
\ No newline at end of file

Property changes on: src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
___________________________________________________________________
Name: svn:mime-type
   + text/plain
Name: svn:keywords
   + "Date Rev Author URL Id"
Name: svn:eol-style
   + native

Index: src/test/org/apache/lucene/index/TestIndexReaderReopen.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReaderReopen.java	(revision 725789)
+++ src/test/org/apache/lucene/index/TestIndexReaderReopen.java	(working copy)
@@ -852,7 +852,7 @@
     }
   }
   
-  private static void createIndex(Directory dir, boolean multiSegment) throws IOException {
+  public static void createIndex(Directory dir, boolean multiSegment) throws IOException {
     IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
     
     w.setMergePolicy(new LogDocMergePolicy());
@@ -978,7 +978,7 @@
     }
   }
 
-  private void assertRefCountEquals(int refCount, IndexReader reader) {
+  public static void assertRefCountEquals(int refCount, IndexReader reader) {
     assertEquals("Reader has wrong refCount value.", refCount, reader.getRefCount());
   }
 
Index: src/java/org/apache/lucene/index/DirectoryIndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/DirectoryIndexReader.java	(revision 725789)
+++ src/java/org/apache/lucene/index/DirectoryIndexReader.java	(working copy)
@@ -36,7 +36,7 @@
  * "own" the directory, which means that they try to acquire a write lock
  * whenever index modifications are performed.
  */
-abstract class DirectoryIndexReader extends IndexReader {
+abstract class DirectoryIndexReader extends IndexReader implements Cloneable {
   protected Directory directory;
   protected boolean closeDirectory;
   private IndexDeletionPolicy deletionPolicy;
@@ -138,23 +138,39 @@
 
     return reader;
   }
-
+  
   public final synchronized IndexReader reopen() throws CorruptIndexException, IOException {
+    return doReopen(false);
+  }
+  
+  public final synchronized IndexReader clone() {
+    try {
+      return doReopen(true);
+    } catch (Exception ex) {
+      ex.printStackTrace();
+      throw new RuntimeException("", ex);
+    }
+  }
+  
+  protected final synchronized IndexReader doReopen(final boolean doClone) throws CorruptIndexException, IOException {
     ensureOpen();
-
-    if (this.hasChanges || this.isCurrent()) {
+    // if we're cloning, then this does not apply
+    if (!doClone && (this.hasChanges || this.isCurrent())) {
       // this has changes, therefore we have the lock and don't need to reopen
       // OR: the index in the directory hasn't changed - nothing to do here
       return this;
     }
-
+    if (doClone && writeLock != null) {
+      writeLock.release();
+      writeLock = null;
+    }
     final SegmentInfos.FindSegmentsFile finder = new SegmentInfos.FindSegmentsFile(directory) {
 
       protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
         SegmentInfos infos = new SegmentInfos();
         infos.read(directory, segmentFileName);
 
-        DirectoryIndexReader newReader = doReopen(infos);
+        DirectoryIndexReader newReader = doReopen(infos, doClone);
         
         if (DirectoryIndexReader.this != newReader) {
           newReader.init(directory, infos, closeDirectory, readOnly);
@@ -197,7 +213,7 @@
   /**
    * Re-opens the index using the passed-in SegmentInfos 
    */
-  protected abstract DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException;
+  protected abstract DirectoryIndexReader doReopen(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException;
   
   public void setDeletionPolicy(IndexDeletionPolicy deletionPolicy) {
     this.deletionPolicy = deletionPolicy;
Index: src/java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentReader.java	(revision 725789)
+++ src/java/org/apache/lucene/index/SegmentReader.java	(working copy)
@@ -54,6 +54,8 @@
   CloseableThreadLocal termVectorsLocal = new CloseableThreadLocal();
 
   BitVector deletedDocs = null;
+  Ref deletedDocsCopyOnWriteRef = new Ref();
+  private Ref writeLockRef = new Ref();
   private boolean deletedDocsDirty = false;
   private boolean normsDirty = false;
   private boolean undeleteAll = false;
@@ -64,7 +66,9 @@
   private boolean rollbackUndeleteAll = false;
   private int rollbackPendingDeleteCount;
   private boolean readOnly;
-
+  private boolean isCloned = false;
+  private boolean isClone = false;
+  protected boolean releaseLockOnClone = true;
   IndexInput freqStream;
   IndexInput proxStream;
 
@@ -79,7 +83,28 @@
   // in case this is a re-opened reader
   private SegmentReader referencedSegmentReader = null;
   
-  private class Norm {
+  static class Ref {
+    volatile private int refCount;
+    
+    public synchronized int refCount() {
+      return refCount;
+    }
+    
+    public synchronized void incRef() {
+      //assert refCount > 0;
+      refCount++;
+    }
+
+    public synchronized void decRef() {
+      assert refCount > 0;
+      //if (refCount == 1) {
+      //  close();
+      // }
+      refCount--;
+    }
+  }
+  
+  private class Norm implements Cloneable {
     volatile int refCount;
     boolean useSingleNormStream;
     
@@ -112,6 +137,24 @@
     private int number;
     private long normSeek;
     private boolean rollbackDirty;
+    
+    private Norm(boolean useSingleNormStream, byte[] bytes, int number, boolean dirty, long normSeek, boolean rollbackDirty, IndexInput in) {
+      this.useSingleNormStream = useSingleNormStream;
+      this.bytes = bytes;
+      this.number = number;
+      this.dirty = dirty;
+      this.normSeek = normSeek;
+      this.rollbackDirty = rollbackDirty;
+      this.in = in;
+    }
+    
+    public synchronized Object clone() {
+      byte[] cloneBytes = new byte[bytes.length];
+      System.arraycopy(bytes, 0, cloneBytes, 0, bytes.length);
+      // is it necessary to clone everything?
+      Norm norm = new Norm(useSingleNormStream, cloneBytes, number, dirty, normSeek, rollbackDirty, (IndexInput)in.clone());
+      return norm;
+    }
 
     private void reWrite(SegmentInfo si) throws IOException {
       // NOTE: norms are re-written in regular directory, not cfs
@@ -400,6 +443,8 @@
     // NOTE: the bitvector is stored using the regular directory, not cfs
     if (hasDeletions(si)) {
       deletedDocs = new BitVector(directory(), si.getDelFileName());
+      deletedDocsCopyOnWriteRef = new Ref();
+      deletedDocsCopyOnWriteRef.incRef();
      
       assert si.getDelCount() == deletedDocs.count() : 
         "delete count mismatch: info=" + si.getDelCount() + " vs BitVector=" + deletedDocs.count();
@@ -413,13 +458,23 @@
       assert si.getDelCount() == 0;
   }
   
-  protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException {
+  //public Object clone() {
+  //  BitVector deletedDocsBV = (BitVector)deletedDocs.clone();
+  //  doReopen(infos, true);
+    //doReopen(SegmentInfos infos, boolean doClone);
+  //}
+  
+  protected BitVector cloneBitVector(BitVector bv) {
+    return (BitVector)bv.clone();
+  }
+  
+  protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException {
     DirectoryIndexReader newReader;
     
     if (infos.size() == 1) {
       SegmentInfo si = infos.info(0);
       if (segment.equals(si.name) && si.getUseCompoundFile() == SegmentReader.this.si.getUseCompoundFile()) {
-        newReader = reopenSegment(si);
+        newReader = reopenSegment(si, doClone);
       } else { 
         // segment not referenced anymore, reopen not possible
         // or segment format changed
@@ -427,20 +482,28 @@
       }
     } else {
       if (readOnly)
-        return new ReadOnlyMultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null);
+        return new ReadOnlyMultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, doClone);
       else
-        return new MultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, false);
+        return new MultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, false, doClone);
     }
     
     return newReader;
   }
   
-  synchronized SegmentReader reopenSegment(SegmentInfo si) throws CorruptIndexException, IOException {
+  synchronized SegmentReader reopenSegment(SegmentInfo si, boolean doClone) throws CorruptIndexException, IOException {
     boolean deletionsUpToDate = (this.si.hasDeletions() == si.hasDeletions()) 
                                   && (!si.hasDeletions() || this.si.getDelFileName().equals(si.getDelFileName()));
     boolean normsUpToDate = true;
-
-    
+    /**
+    System.out.println("reopenSegment doClone: "+doClone);
+    if (doClone && releaseLockOnClone) {
+      if (writeLock != null) {
+        
+        writeLock.release();
+        writeLock = null;
+      }
+    }
+    **/
     boolean[] fieldNormsChanged = new boolean[fieldInfos.size()];
     if (normsUpToDate) {
       for (int i = 0; i < fieldInfos.size(); i++) {
@@ -450,18 +513,24 @@
         }
       }
     }
-
-    if (normsUpToDate && deletionsUpToDate) {
+    // if we're cloning we need to run through the reopenSegment logic
+    if (normsUpToDate && deletionsUpToDate && !doClone) {
       return this;
     }    
-    
-
+     
+    if (doClone && normsDirty) {
+      normsUpToDate = false;
+    }
       // clone reader
     SegmentReader clone;
-    if (readOnly) 
-      clone = new ReadOnlySegmentReader();
-    else
-      clone = new SegmentReader();
+    try {
+      if (readOnly)
+        clone = (SegmentReader)READONLY_IMPL.newInstance();
+      else
+        clone = (SegmentReader)IMPL.newInstance();
+    } catch (Exception e) {
+      throw new RuntimeException("cannot load SegmentReader class: " + e, e);
+    }
 
     boolean success = false;
     try {
@@ -503,16 +572,21 @@
         clone.fieldsReader = new FieldsReader(storeDir, fieldsSegment, fieldInfos, readBufferSize,
                                         si.getDocStoreOffset(), si.docCount);
       }
-      
-      
-      if (!deletionsUpToDate) {
-        // load deleted docs
-        clone.deletedDocs = null;
-        clone.loadDeletedDocs();
+      deletedDocsCopyOnWriteRef.incRef();
+      if (doClone) {
+        clone.deletedDocs = deletedDocs;
+        isCloned = true; // mark this segmentreader as cloned
+        clone.isClone = true;
       } else {
-        clone.deletedDocs = this.deletedDocs;
+        if (!deletionsUpToDate) {
+          // load deleted docs
+          clone.deletedDocs = null;
+          clone.loadDeletedDocs();
+        } else {
+          clone.deletedDocs = deletedDocs;
+        }
       }
-  
+      clone.deletedDocsCopyOnWriteRef = deletedDocsCopyOnWriteRef;
       clone.norms = new HashMap();
       if (!normsUpToDate) {
         // load norms
@@ -615,7 +689,14 @@
   FieldsReader getFieldsReader() {
     return fieldsReader;
   }
-
+  
+  //protected void acquireWriteLock() throws IOException {
+  //  System.out.println("acquireWriteLock ref count: "+this.writeLockRef.refCount+" isCloned: "+isCloned);
+  //  if (isCloned) throw new UnsupportedOperationException("this reader has been cloned and no updates are allowed");
+  //  if (writeLockRef.refCount() == 0) super.acquireWriteLock();
+    
+  //}
+  
   protected void doClose() throws IOException {
     boolean hasReferencedReader = (referencedSegmentReader != null);
 
@@ -689,8 +770,20 @@
   }
 
   protected void doDelete(int docNum) {
-    if (deletedDocs == null)
+    if (deletedDocs == null) {
       deletedDocs = new BitVector(maxDoc());
+      deletedDocsCopyOnWriteRef = new Ref();
+      deletedDocsCopyOnWriteRef.incRef();
+    }
+    // there is more than 1 SegmentReader with a reference to this
+    // deletedDocs BitVector so decRef the current copyOnWriteRef,
+    // clone the BitVector, create a new copyOnWriteRef
+    if (deletedDocsCopyOnWriteRef.refCount() > 1) {
+      deletedDocsCopyOnWriteRef.decRef();
+      deletedDocs = cloneBitVector(deletedDocs);
+      deletedDocsCopyOnWriteRef = new Ref();
+      deletedDocsCopyOnWriteRef.incRef();
+    }
     deletedDocsDirty = true;
     undeleteAll = false;
     if (!deletedDocs.getAndSet(docNum))
@@ -875,10 +968,18 @@
     Norm norm = (Norm) norms.get(field);
     if (norm == null)                             // not an indexed field
       return;
-
+    // need to clone the norm because we're changing it 
+    // and this segmentreader is a clone
+    if (norm.refCount > 1 && isCloned) {
+      Norm clonedNorm = (Norm)norm.clone();
+      norm.decRef();       // 1 less ref to this norm
+      clonedNorm.incRef(); // add a ref to the cloned norm
+      norms.put(field, clonedNorm);
+      norm = clonedNorm;
+    }
     norm.dirty = true;                            // mark it dirty
     normsDirty = true;
-
+    
     norms(field)[doc] = value;                    // set the value
   }
 
@@ -892,7 +993,7 @@
       System.arraycopy(fakeNorms(), 0, bytes, offset, maxDoc());
       return;
     }
-    
+  
     synchronized(norm) {
       if (norm.bytes != null) {                     // can copy from cache
         System.arraycopy(norm.bytes, 0, bytes, offset, maxDoc());
Index: src/java/org/apache/lucene/index/IndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/IndexReader.java	(revision 725789)
+++ src/java/org/apache/lucene/index/IndexReader.java	(working copy)
@@ -67,7 +67,7 @@
 
  @version $Id$
 */
-public abstract class IndexReader {
+public abstract class IndexReader implements Cloneable {
 
   // NOTE: in 3.0 this will change to true
   final static boolean READ_ONLY_DEFAULT = false;
@@ -354,6 +354,10 @@
   public synchronized IndexReader reopen() throws CorruptIndexException, IOException {
     throw new UnsupportedOperationException("This reader does not support reopen().");
   }
+  
+  public synchronized IndexReader clone() {
+    throw new UnsupportedOperationException("This reader does not support clone().");
+  }
 
   /** 
    * Returns the directory associated with this index.  The Default 
Index: src/java/org/apache/lucene/index/MultiSegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiSegmentReader.java	(revision 725789)
+++ src/java/org/apache/lucene/index/MultiSegmentReader.java	(working copy)
@@ -33,7 +33,7 @@
 /** 
  * An IndexReader which reads indexes with multiple segments.
  */
-class MultiSegmentReader extends DirectoryIndexReader {
+class MultiSegmentReader extends DirectoryIndexReader implements Cloneable {
   protected SegmentReader[] subReaders;
   private int[] starts;                           // 1st docno for each segment
   private Map normsCache = new HashMap();
@@ -71,7 +71,7 @@
   }
 
   /** This contructor is only used for {@link #reopen()} */
-  MultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache, boolean readOnly) throws IOException {
+  MultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache, boolean readOnly, boolean doClone) throws IOException {
     super(directory, infos, closeDirectory, readOnly);
 
     // we put the old SegmentReaders in a map, that allows us
@@ -109,7 +109,7 @@
           // this is a new reader; in case we hit an exception we can close it safely
           newReader = SegmentReader.get(readOnly, infos.info(i));
         } else {
-          newReader = (SegmentReader) newReaders[i].reopenSegment(infos.info(i));
+          newReader = (SegmentReader) newReaders[i].reopenSegment(infos.info(i), doClone);
         }
         if (newReader == newReaders[i]) {
           // this reader will be shared between the old and the new one,
@@ -181,7 +181,7 @@
       }
     }
   }
-
+  
   private void initialize(SegmentReader[] subReaders) {
     this.subReaders = subReaders;
     starts = new int[subReaders.length + 1];    // build starts array
@@ -195,15 +195,15 @@
     starts[subReaders.length] = maxDoc;
   }
 
-  protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException {
+  protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException {
     if (infos.size() == 1) {
       // The index has only one segment now, so we can't refresh the MultiSegmentReader.
       // Return a new [ReadOnly]SegmentReader instead
       return SegmentReader.get(readOnly, infos, infos.info(0), false);
     } else if (readOnly) {
-      return new ReadOnlyMultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache);
+      return new ReadOnlyMultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache, doClone);
     } else {
-      return new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache, false);
+      return new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache, false, doClone);
     }            
   }
 
Index: src/java/org/apache/lucene/index/ReadOnlyMultiSegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/ReadOnlyMultiSegmentReader.java	(revision 725789)
+++ src/java/org/apache/lucene/index/ReadOnlyMultiSegmentReader.java	(working copy)
@@ -27,8 +27,8 @@
     super(directory, sis, closeDirectory, true);
   }
 
-  ReadOnlyMultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache) throws IOException {
-    super(directory, infos, closeDirectory, oldReaders, oldStarts, oldNormsCache, true);
+  ReadOnlyMultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache, boolean doClone) throws IOException {
+    super(directory, infos, closeDirectory, oldReaders, oldStarts, oldNormsCache, true, doClone);
   }
 
   protected void acquireWriteLock() {
Index: src/java/org/apache/lucene/store/SimpleFSLockFactory.java
===================================================================
--- src/java/org/apache/lucene/store/SimpleFSLockFactory.java	(revision 725789)
+++ src/java/org/apache/lucene/store/SimpleFSLockFactory.java	(working copy)
@@ -145,6 +145,7 @@
   }
 
   public void release() throws LockReleaseFailedException {
+    System.out.println("lock.release hash: "+hashCode());
     if (lockFile.exists() && !lockFile.delete())
       throw new LockReleaseFailedException("failed to delete " + lockFile);
   }
Index: src/java/org/apache/lucene/util/BitVector.java
===================================================================
--- src/java/org/apache/lucene/util/BitVector.java	(revision 725789)
+++ src/java/org/apache/lucene/util/BitVector.java	(working copy)
@@ -35,7 +35,7 @@
 
   @version $Id$
   */
-public final class BitVector {
+public final class BitVector implements Cloneable {
 
   private byte[] bits;
   private int size;
@@ -46,7 +46,19 @@
     size = n;
     bits = new byte[(size >> 3) + 1];
   }
-
+  
+  BitVector(byte[] bits, int size, int count) {
+    this.bits = bits;
+    this.size = size;
+    this.count = count;
+  }
+  
+  public Object clone() {
+    byte[] copyBits = new byte[bits.length];
+    System.arraycopy(bits, 0, copyBits, 0, bits.length);
+    return new BitVector(copyBits, size, count);
+  }
+  
   /** Sets the value of <code>bit</code> to one. */
   public final void set(int bit) {
     if (bit >= size) {
