Index: java/org/apache/lucene/index/DirectoryIndexReader.java
===================================================================
--- java/org/apache/lucene/index/DirectoryIndexReader.java	(revision 674202)
+++ java/org/apache/lucene/index/DirectoryIndexReader.java	(working copy)
@@ -112,23 +112,32 @@
       return (DirectoryIndexReader) finder.doBody(commit.getSegmentsFileName());
     }
   }
-
-  public final synchronized IndexReader reopen() throws CorruptIndexException, IOException {
+  
+  protected final synchronized IndexReader doReopenOrClone(final boolean doClone) throws CorruptIndexException, IOException {
     ensureOpen();
 
-    if (this.hasChanges || this.isCurrent()) {
-      // this has changes, therefore we have the lock and don't need to reopen
-      // OR: the index in the directory hasn't changed - nothing to do here
-      return this;
+    if (doClone) {
+      if (hasChanges)
+        throw new IllegalStateException("cannot clone() a reader with uncommitted changes");
+    } else {
+      if (hasChanges || isCurrent())
+        // If we have changes we have the write lock and we
+        // are already "current", or the index in the
+        // directory hasn't changed - nothing to do here
+        return this;
     }
 
     return (DirectoryIndexReader) new SegmentInfos.FindSegmentsFile(directory) {
 
       protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
-        SegmentInfos infos = new SegmentInfos();
-        infos.read(directory, segmentFileName);
-
-        DirectoryIndexReader newReader = doReopen(infos);
+        SegmentInfos infos = null;
+        if (doClone) {
+          infos = (SegmentInfos) segmentInfos.clone();
+        } else {
+          infos = new SegmentInfos();
+          infos.read(directory, segmentFileName);
+        }
+        DirectoryIndexReader newReader = doReopenOrClone(infos, doClone);
         
         if (DirectoryIndexReader.this != newReader) {
           newReader.init(directory, infos, closeDirectory);
@@ -143,7 +152,7 @@
   /**
    * Re-opens the index using the passed-in SegmentInfos 
    */
-  protected abstract DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException;
+  protected abstract DirectoryIndexReader doReopenOrClone(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException;
   
   public void setDeletionPolicy(IndexDeletionPolicy deletionPolicy) {
     this.deletionPolicy = deletionPolicy;
Index: java/org/apache/lucene/index/FieldsReader.java
===================================================================
--- java/org/apache/lucene/index/FieldsReader.java	(revision 674202)
+++ java/org/apache/lucene/index/FieldsReader.java	(working copy)
@@ -48,6 +48,7 @@
   private final IndexInput fieldsStream;
 
   private final IndexInput indexStream;
+  private final IndexInput cloneableIndexStream;
   private int numTotalDocs;
   private int size;
   private boolean closed;
@@ -75,8 +76,8 @@
       fieldInfos = fn;
 
       cloneableFieldsStream = d.openInput(segment + "." + IndexFileNames.FIELDS_EXTENSION, readBufferSize);
-      indexStream = d.openInput(segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION, readBufferSize);
-
+      cloneableIndexStream = d.openInput(segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION, readBufferSize);
+      indexStream = (IndexInput)cloneableIndexStream.clone();
       // First version of fdx did not include a format
       // header, but, the first int will always be 0 in that
       // case
@@ -97,7 +98,7 @@
 
       if (format < FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)
         cloneableFieldsStream.setModifiedUTF8StringsMode();
-
+      
       fieldsStream = (IndexInput) cloneableFieldsStream.clone();
 
       final long indexSize = indexStream.length()-formatSize;
@@ -130,6 +131,36 @@
   }
 
   /**
+   * For use by clone
+   * @param fieldInfos
+   * @param cloneableFieldsStream
+   * @param fieldsStream
+   * @param indexStream
+   * @param numTotalDocs
+   * @param size
+   * @param format
+   * @param formatSize
+   */
+  private FieldsReader(FieldInfos fieldInfos, IndexInput cloneableFieldsStream, 
+      IndexInput cloneableIndexStream, int numTotalDocs, int size, int format, int formatSize, int docStoreOffset) {
+    this.fieldInfos = fieldInfos;
+    this.cloneableFieldsStream = cloneableFieldsStream;
+    this.cloneableIndexStream = cloneableIndexStream;
+    this.fieldsStream = (IndexInput)cloneableFieldsStream.clone();
+    this.indexStream = (IndexInput)cloneableIndexStream.clone();
+    this.numTotalDocs = numTotalDocs;
+    this.size = size;
+    this.format = format;
+    this.formatSize = formatSize;
+    this.docStoreOffset = docStoreOffset;
+  }
+  
+  public Object clone() {
+    return new FieldsReader(fieldInfos, (IndexInput)cloneableFieldsStream.clone(), 
+                            (IndexInput)cloneableIndexStream.clone(), numTotalDocs, size, format, formatSize, docStoreOffset);
+  }
+  
+  /**
    * @throws AlreadyClosedException if this FieldsReader is closed
    */
   protected final void ensureOpen() throws AlreadyClosedException {
Index: java/org/apache/lucene/index/FilterIndexReader.java
===================================================================
--- java/org/apache/lucene/index/FilterIndexReader.java	(revision 674202)
+++ java/org/apache/lucene/index/FilterIndexReader.java	(working copy)
@@ -104,6 +104,10 @@
     this.in = in;
   }
 
+  public Object clone() {
+    return new FilterIndexReader(in);
+  }
+
   public Directory directory() {
     return in.directory();
   }
Index: java/org/apache/lucene/index/IndexReader.java
===================================================================
--- java/org/apache/lucene/index/IndexReader.java	(revision 674202)
+++ java/org/apache/lucene/index/IndexReader.java	(working copy)
@@ -54,7 +54,7 @@
 
  @version $Id$
 */
-public abstract class IndexReader {
+public abstract class IndexReader implements Cloneable {
 
   /**
    * Constants describing field properties, for example used for
@@ -270,9 +270,21 @@
    * @throws IOException if there is a low-level IO error
    */  
   public synchronized IndexReader reopen() throws CorruptIndexException, IOException {
-    throw new UnsupportedOperationException("This reader does not support reopen().");
+    return doReopenOrClone(false);
   }
+  
+  public Object clone() {
+    try {
+      return doReopenOrClone(true);
+    } catch (Exception exception) {
+      throw new RuntimeException(exception);
+    }
+  }
 
+  protected IndexReader doReopenOrClone(boolean forced) throws CorruptIndexException, IOException {
+    throw new UnsupportedOperationException("This reader does not support reopen() or clone()");  
+  }
+
   /** 
    * Returns the directory associated with this index.  The Default 
    * implementation returns the directory specified by subclasses when 
Index: java/org/apache/lucene/index/MultiReader.java
===================================================================
--- java/org/apache/lucene/index/MultiReader.java	(revision 674202)
+++ java/org/apache/lucene/index/MultiReader.java	(working copy)
@@ -88,7 +88,7 @@
   }
 
   /**
-   * Tries to reopen the subreaders.
+   * Tries to reopen or clone the subreaders.
    * <br>
    * If one or more subreaders could be re-opened (i. e. subReader.reopen() 
    * returned a new instance != subReader), then a new MultiReader instance 
@@ -106,7 +106,7 @@
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error 
    */
-  public IndexReader reopen() throws CorruptIndexException, IOException {
+  protected IndexReader doReopenOrClone(boolean doClone) throws CorruptIndexException, IOException {
     ensureOpen();
     
     boolean reopened = false;
@@ -116,7 +116,10 @@
     boolean success = false;
     try {
       for (int i = 0; i < subReaders.length; i++) {
-        newSubReaders[i] = subReaders[i].reopen();
+        if (doClone)
+          newSubReaders[i] = (IndexReader) subReaders[i].clone();
+        else 
+          newSubReaders[i] = subReaders[i].reopen();
         // if at least one of the subreaders was updated we remember that
         // and return a new MultiReader
         if (newSubReaders[i] != subReaders[i]) {
Index: java/org/apache/lucene/index/MultiSegmentReader.java
===================================================================
--- java/org/apache/lucene/index/MultiSegmentReader.java	(revision 674202)
+++ java/org/apache/lucene/index/MultiSegmentReader.java	(working copy)
@@ -70,7 +70,7 @@
   }
 
   /** This contructor is only used for {@link #reopen()} */
-  MultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache) throws IOException {
+  MultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache, boolean doClone) throws IOException {
     super(directory, infos, closeDirectory);
     
     // we put the old SegmentReaders in a map, that allows us
@@ -108,7 +108,7 @@
           // this is a new reader; in case we hit an exception we can close it safely
           newReader = SegmentReader.get(infos.info(i));
         } else {
-          newReader = (SegmentReader) newReaders[i].reopenSegment(infos.info(i));
+          newReader = (SegmentReader) newReaders[i].reopenSegment(infos.info(i), doClone);
         }
         if (newReader == newReaders[i]) {
           // this reader will be shared between the old and the new one,
@@ -193,14 +193,14 @@
     starts[subReaders.length] = maxDoc;
   }
 
-  protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException {
+  protected synchronized DirectoryIndexReader doReopenOrClone(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException {
     if (infos.size() == 1) {
       // The index has only one segment now, so we can't refresh the MultiSegmentReader.
       // Return a new SegmentReader instead
       SegmentReader newReader = SegmentReader.get(infos, infos.info(0), false);
       return newReader;
     } else {
-      return new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache);
+      return new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache, doClone);
     }            
   }
 
Index: java/org/apache/lucene/index/ParallelReader.java
===================================================================
--- java/org/apache/lucene/index/ParallelReader.java	(revision 674202)
+++ java/org/apache/lucene/index/ParallelReader.java	(working copy)
@@ -142,7 +142,7 @@
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error 
    */
-  public IndexReader reopen() throws CorruptIndexException, IOException {
+  protected IndexReader doReopenOrClone(boolean doClone) throws CorruptIndexException, IOException {
     ensureOpen();
     
     boolean reopened = false;
@@ -155,7 +155,11 @@
     
       for (int i = 0; i < readers.size(); i++) {
         IndexReader oldReader = (IndexReader) readers.get(i);
-        IndexReader newReader = oldReader.reopen();
+        final IndexReader newReader;
+        if (doClone)
+          newReader = (IndexReader) oldReader.clone();
+        else
+          newReader = oldReader.reopen();
         newReaders.add(newReader);
         // if at least one of the subreaders was updated we remember that
         // and return a new MultiReader
Index: java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- java/org/apache/lucene/index/SegmentReader.java	(revision 674202)
+++ java/org/apache/lucene/index/SegmentReader.java	(working copy)
@@ -40,33 +40,35 @@
  * @version $Id$
  */
 class SegmentReader extends DirectoryIndexReader {
-  private String segment;
-  private SegmentInfo si;
-  private int readBufferSize;
+  protected String segment;
+  protected SegmentInfo si;
+  protected int readBufferSize;
 
   FieldInfos fieldInfos;
-  private FieldsReader fieldsReader;
+  protected FieldsReader fieldsReaderCloneable;
 
   TermInfosReader tis;
   TermVectorsReader termVectorsReaderOrig = null;
-  ThreadLocal termVectorsLocal = new ThreadLocal();
-
+  TermVectorsReaderLocal termVectorsLocal;
+  FieldsReaderLocal fieldsReaderLocal;
+    
   BitVector deletedDocs = null;
-  private boolean deletedDocsDirty = false;
-  private boolean normsDirty = false;
-  private boolean undeleteAll = false;
-  private int pendingDeleteCount;
+  CopyOnWriteRef deletedDocsCopyOnWriteRef;
+  protected boolean deletedDocsDirty = false;
+  protected boolean normsDirty = false;
+  protected boolean undeleteAll = false;
+  protected int pendingDeleteCount;
 
-  private boolean rollbackDeletedDocsDirty = false;
-  private boolean rollbackNormsDirty = false;
-  private boolean rollbackUndeleteAll = false;
-  private int rollbackPendingDeleteCount;
+  protected boolean rollbackDeletedDocsDirty = false;
+  protected boolean rollbackNormsDirty = false;
+  protected boolean rollbackUndeleteAll = false;
+  protected int rollbackPendingDeleteCount;
 
   IndexInput freqStream;
   IndexInput proxStream;
 
   // optionally used for the .nrm file shared by multiple norms
-  private IndexInput singleNormStream;
+  protected IndexInput singleNormStream;
 
   // Compound File Reader when based on a compound file segment
   CompoundFileReader cfsReader = null;
@@ -74,9 +76,31 @@
   
   // indicates the SegmentReader with which the resources are being shared,
   // in case this is a re-opened reader
-  private SegmentReader referencedSegmentReader = null;
+  protected SegmentReader referencedSegmentReader = null;
   
-  private class Norm {
+  protected class CopyOnWriteRef {
+    private int refCount = 0;
+    
+    public synchronized int refCount() {
+      return refCount;
+    }
+    
+    public synchronized void incRef() {
+      refCount++;
+    }
+    
+    public synchronized void decRef() {
+      refCount--;
+    }
+  }
+  
+  protected byte[] cloneNormBytes(byte[] bytes) {
+    byte[] cloneBytes = new byte[bytes.length];
+    System.arraycopy(bytes, 0, cloneBytes, 0, bytes.length);
+    return cloneBytes;
+  }
+  
+  protected class Norm implements Cloneable {
     volatile int refCount;
     boolean useSingleNormStream;
     
@@ -94,6 +118,28 @@
 
     }
     
+    public void copyBytes() {
+      assert copyOnWriteRef != null;
+      if (bytes != null) {
+        byte[] cloneBytes = cloneNormBytes(bytes);
+        bytes = cloneBytes;
+      }
+    }
+    
+    public Norm cloneRefBytes() {
+      Norm clone = new Norm(in, useSingleNormStream, number, normSeek);
+      clone.bytes = bytes;
+      return clone;
+    }
+    
+    public Object clone() {
+      Norm clone = new Norm(in, useSingleNormStream, number, normSeek);
+      if (bytes != null) {
+        clone.bytes = cloneNormBytes(bytes);
+      }
+      return clone;
+    }
+    
     public Norm(IndexInput in, boolean useSingleNormStream, int number, long normSeek)
     {
       refCount = 1;
@@ -103,14 +149,15 @@
       this.useSingleNormStream = useSingleNormStream;
     }
 
-    private IndexInput in;
-    private byte[] bytes;
-    private boolean dirty;
-    private int number;
-    private long normSeek;
-    private boolean rollbackDirty;
+    protected IndexInput in;
+    protected byte[] bytes;
+    protected boolean dirty;
+    protected int number;
+    protected long normSeek;
+    protected boolean rollbackDirty;
+    protected CopyOnWriteRef copyOnWriteRef;
 
-    private void reWrite(SegmentInfo si) throws IOException {
+    protected void reWrite(SegmentInfo si) throws IOException {
       // NOTE: norms are re-written in regular directory, not cfs
       si.advanceNormGen(this.number);
       IndexOutput out = directory().createOutput(si.getNormFileName(this.number));
@@ -126,7 +173,7 @@
      * It is still valid to access all other norm properties after close is called.
      * @throws IOException
      */
-    private synchronized void close() throws IOException {
+    protected synchronized void close() throws IOException {
       if (in != null && !useSingleNormStream) {
         in.close();
       }
@@ -265,6 +312,10 @@
     return instance;
   }
 
+  /** So a subclass can do its own initializing. */
+  protected void doInitialize() {
+  }
+  
   private void initialize(SegmentInfo si, int readBufferSize, boolean doOpenStores) throws CorruptIndexException, IOException {
     segment = si.name;
     this.si = si;
@@ -306,12 +357,12 @@
         fieldsSegment = segment;
 
       if (doOpenStores) {
-        fieldsReader = new FieldsReader(storeDir, fieldsSegment, fieldInfos, readBufferSize,
+        fieldsReaderCloneable = new FieldsReader(storeDir, fieldsSegment, fieldInfos, readBufferSize,
                                         si.getDocStoreOffset(), si.docCount);
-
+        fieldsReaderLocal = new FieldsReaderLocal(storeDir, fieldsSegment, fieldsReaderCloneable);
         // Verify two sources of "maxDoc" agree:
-        if (si.getDocStoreOffset() == -1 && fieldsReader.size() != si.docCount) {
-          throw new CorruptIndexException("doc counts differ for segment " + si.name + ": fieldsReader shows " + fieldsReader.size() + " but segmentInfo shows " + si.docCount);
+        if (si.getDocStoreOffset() == -1 && fieldsReaderCloneable.size() != si.docCount) {
+          throw new CorruptIndexException("doc counts differ for segment " + si.name + ": fieldsReader shows " + fieldsReaderCloneable.size() + " but segmentInfo shows " + si.docCount);
         }
       }
 
@@ -332,7 +383,9 @@
         else
           vectorsSegment = segment;
         termVectorsReaderOrig = new TermVectorsReader(storeDir, vectorsSegment, fieldInfos, readBufferSize, si.getDocStoreOffset(), si.docCount);
+        termVectorsLocal = new TermVectorsReaderLocal(termVectorsReaderOrig);
       }
+      doInitialize();
       success = true;
     } finally {
 
@@ -364,26 +417,36 @@
       assert si.getDelCount() == 0;
   }
   
-  protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException {
+  protected synchronized DirectoryIndexReader doReopenOrClone(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException {
+
+    if (deletedDocsDirty || normsDirty) {
+      throw new IllegalStateException("Cannot clone IndexReader with pending changes deletedDocsDirty: "+deletedDocsDirty+" normsDirty: "+normsDirty);
+    }
+
     DirectoryIndexReader newReader;
     
     if (infos.size() == 1) {
       SegmentInfo si = infos.info(0);
       if (segment.equals(si.name) && si.getUseCompoundFile() == SegmentReader.this.si.getUseCompoundFile()) {
-        newReader = reopenSegment(si);
+        newReader = reopenSegment(si, doClone);
       } else { 
         // segment not referenced anymore, reopen not possible
         // or segment format changed
         newReader = SegmentReader.get(infos, infos.info(0), false);
       }
     } else {
-      return new MultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null);
+      return new MultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, doClone);
     }
     
     return newReader;
   }
   
-  synchronized SegmentReader reopenSegment(SegmentInfo si) throws CorruptIndexException, IOException {
+  protected BitVector cloneDeletedDocs(BitVector deletedDocs) {
+    if (this.deletedDocs == null) return null;
+    return (BitVector)deletedDocs.clone();
+  }
+  
+  synchronized SegmentReader reopenSegment(SegmentInfo si, boolean doClone) throws CorruptIndexException, IOException {
     boolean deletionsUpToDate = (this.si.hasDeletions() == si.hasDeletions()) 
                                   && (!si.hasDeletions() || this.si.getDelFileName().equals(si.getDelFileName()));
     boolean normsUpToDate = true;
@@ -399,13 +462,16 @@
       }
     }
 
-    if (normsUpToDate && deletionsUpToDate) {
+    if ((normsUpToDate && deletionsUpToDate) && !doClone) {
       return this;
     }    
     
-
-      // clone reader
-    SegmentReader clone = new SegmentReader();
+    SegmentReader clone;
+    try {
+      clone = (SegmentReader)IMPL.newInstance();
+    } catch (Exception e) {
+      throw new RuntimeException("cannot load SegmentReader class: " + e, e);
+    }
     boolean success = false;
     try {
       clone.directory = directory;
@@ -414,13 +480,14 @@
       clone.readBufferSize = readBufferSize;
       clone.cfsReader = cfsReader;
       clone.storeCFSReader = storeCFSReader;
-  
+      clone.fieldsReaderCloneable = fieldsReaderCloneable;
       clone.fieldInfos = fieldInfos;
       clone.tis = tis;
       clone.freqStream = freqStream;
       clone.proxStream = proxStream;
       clone.termVectorsReaderOrig = termVectorsReaderOrig;
-  
+      clone.fieldsReaderLocal = fieldsReaderLocal;
+      clone.termVectorsLocal = termVectorsLocal;
       
       // we have to open a new FieldsReader, because it is not thread-safe
       // and can thus not be shared among multiple SegmentReaders
@@ -440,13 +507,7 @@
           storeDir = cfsReader;
         }
       }
-  
-      if (fieldsReader != null) {
-        clone.fieldsReader = new FieldsReader(storeDir, fieldsSegment, fieldInfos, readBufferSize,
-                                        si.getDocStoreOffset(), si.docCount);
-      }
       
-      
       if (!deletionsUpToDate) {
         // load deleted docs
         clone.deletedDocs = null;
@@ -455,7 +516,7 @@
         clone.deletedDocs = this.deletedDocs;
       }
   
-      clone.norms = new HashMap();
+      //clone.norms = new HashMap(); already performed at variable declaration
       if (!normsUpToDate) {
         // load norms
         for (int i = 0; i < fieldNormsChanged.length; i++) {
@@ -495,8 +556,37 @@
             }
           }
         }  
-      }    
-  
+      } 
+      // Make norms copyOnWrite and create a new norm object so 
+      // there is no need to have synchronized norms map.
+      // There is a reference to the byte array.  If this reader
+      // or the cloned reader wants to write to the byte array they now 
+      // have to make a copy first.  If the this reader 
+      // did not make a copy on write then the updates would be seen
+      // by the cloned reader which is not desired behavior for a clone
+      // which should be a copy at the point clone was called
+      if (doClone) {
+        if (clone.deletedDocs != null) {
+          deletedDocsCopyOnWriteRef = new CopyOnWriteRef();
+          deletedDocsCopyOnWriteRef.incRef();
+          clone.deletedDocsCopyOnWriteRef = deletedDocsCopyOnWriteRef;
+          clone.deletedDocsCopyOnWriteRef.incRef();
+        }
+        Iterator iterator = clone.norms.entrySet().iterator();
+        while (iterator.hasNext()) {
+          Map.Entry entry = (Map.Entry)iterator.next();
+          String field = (String)entry.getKey();
+          Norm norm = (Norm)entry.getValue();
+          if (norm.copyOnWriteRef == null) {
+            norm.copyOnWriteRef = new CopyOnWriteRef();
+          }
+          norm.copyOnWriteRef.incRef();
+          Norm clonedNorm = norm.cloneRefBytes();
+          clonedNorm.copyOnWriteRef = norm.copyOnWriteRef;
+          clonedNorm.copyOnWriteRef.incRef();
+          clone.norms.put(field, clonedNorm); // replace norm object with cloneRefBytes
+        }
+      }
       success = true;
     } finally {
       if (this.referencedSegmentReader != null) {
@@ -553,10 +643,6 @@
     undeleteAll = false;
   }
 
-  FieldsReader getFieldsReader() {
-    return fieldsReader;
-  }
-
   protected void doClose() throws IOException {
     boolean hasReferencedReader = (referencedSegmentReader != null);
     
@@ -576,17 +662,14 @@
       singleNormStream = null;
     }
     
-    // re-opened SegmentReaders have their own instance of FieldsReader
-    if (fieldsReader != null) {
-      fieldsReader.close();
-    }
-
     if (!hasReferencedReader) { 
       // close everything, nothing is shared anymore with other readers
       if (tis != null) {
         tis.close();
       }
-  
+      if (fieldsReaderCloneable != null) {
+        fieldsReaderCloneable.close();
+      }
       if (freqStream != null)
         freqStream.close();
       if (proxStream != null)
@@ -625,6 +708,16 @@
   }
 
   protected void doDelete(int docNum) {
+    if (deletedDocsCopyOnWriteRef != null && deletedDocs != null) {
+      assert deletedDocsCopyOnWriteRef.refCount() > 0;
+      if (deletedDocsCopyOnWriteRef.refCount() > 1) {
+        deletedDocs = cloneDeletedDocs(deletedDocs);
+        deletedDocsCopyOnWriteRef.decRef();
+      } else {
+        // just use the deletedDocs we have and remove this copyOnWriteRef
+        deletedDocsCopyOnWriteRef = null;
+      }
+    }
     if (deletedDocs == null)
       deletedDocs = new BitVector(maxDoc());
     deletedDocsDirty = true;
@@ -661,11 +754,12 @@
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error
    */
-  public synchronized Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
+  public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
     ensureOpen();
     if (isDeleted(n))
       throw new IllegalArgumentException
               ("attempt to access a deleted document");
+    FieldsReader fieldsReader = getFieldsReader();
     return fieldsReader.doc(n, fieldSelector);
   }
 
@@ -808,7 +902,18 @@
     Norm norm = (Norm) norms.get(field);
     if (norm == null)                             // not an indexed field
       return;
-
+    synchronized (norm) {
+      if (norm.copyOnWriteRef != null) {
+        assert norm.copyOnWriteRef.refCount() > 0;
+        if (norm.copyOnWriteRef.refCount() == 1) {
+          // use the bytes as is, no more references to them
+          norm.copyOnWriteRef = null;
+        } else if (norm.copyOnWriteRef.refCount() > 1) {
+          norm.copyBytes();
+          norm.copyOnWriteRef.decRef();
+        } 
+      }
+    }
     norm.dirty = true;                            // mark it dirty
     normsDirty = true;
 
@@ -910,16 +1015,48 @@
   }
 
   /**
+   * Create a clone from the initial FieldsReader and store it in the ThreadLocal.
+   * @return FieldsReader
+   */
+  FieldsReader getFieldsReader() {
+    return (FieldsReader) fieldsReaderLocal.get();
+  }
+  
+  public class FieldsReaderLocal extends ThreadLocal {
+    private FieldsReader fieldsReaderCloneable;
+    Directory storeDir;
+    String fieldsSegment;
+    
+    
+    public FieldsReaderLocal(Directory storeDir, String fieldsSegment, FieldsReader fieldsReaderCloneable) {
+      this.storeDir = storeDir;
+      this.fieldsSegment = fieldsSegment;
+      this.fieldsReaderCloneable = fieldsReaderCloneable;
+    }
+    
+    protected Object initialValue() {
+      return fieldsReaderCloneable.clone();
+    }
+  }
+  
+  public static class TermVectorsReaderLocal extends ThreadLocal {
+    private TermVectorsReader termVectorsReaderOrig;
+    
+    public TermVectorsReaderLocal(TermVectorsReader termVectorsReaderOrig) {
+      this.termVectorsReaderOrig = termVectorsReaderOrig;
+    }
+    
+    protected Object initialValue() {
+      return termVectorsReaderOrig.clone();
+    }
+  }
+  
+  /**
    * Create a clone from the initial TermVectorsReader and store it in the ThreadLocal.
    * @return TermVectorsReader
    */
   private TermVectorsReader getTermVectorsReader() {
-    TermVectorsReader tvReader = (TermVectorsReader)termVectorsLocal.get();
-    if (tvReader == null) {
-      tvReader = (TermVectorsReader)termVectorsReaderOrig.clone();
-      termVectorsLocal.set(tvReader);
-    }
-    return tvReader;
+    return (TermVectorsReader) termVectorsLocal.get();
   }
   
   /** Return a term frequency vector for the specified document and field. The
Index: java/org/apache/lucene/util/BitVector.java
===================================================================
--- java/org/apache/lucene/util/BitVector.java	(revision 674135)
+++ java/org/apache/lucene/util/BitVector.java	(working copy)
@@ -35,7 +35,7 @@
 
   @version $Id$
   */
-public final class BitVector {
+public final class BitVector implements Cloneable {
 
   private byte[] bits;
   private int size;
@@ -46,7 +46,27 @@
     size = n;
     bits = new byte[(size >> 3) + 1];
   }
-
+  
+  public BitVector(byte[] bits, int size, int count) {
+    this.bits = bits;
+    this.size = size;
+    this.count = count;
+  }
+  
+  public Object clone() {
+    BitVector clone = new BitVector(size);
+    clone.count = count;
+    if (bits != null) {
+      clone.bits = new byte[bits.length];
+      System.arraycopy(bits, 0, clone.bits, 0, bits.length);
+    }
+    return clone;
+  }
+  
+  public byte[] getBits() {
+    return bits;
+  }
+  
   /** Sets the value of <code>bit</code> to one. */
   public final void set(int bit) {
     if (bit >= size) {
Index: test/org/apache/lucene/index/TestIndexReaderReopen.java
===================================================================
--- test/org/apache/lucene/index/TestIndexReaderReopen.java	(revision 674477)
+++ test/org/apache/lucene/index/TestIndexReaderReopen.java	(working copy)
@@ -852,7 +852,7 @@
     }
   }
   
-  private static void createIndex(Directory dir, boolean multiSegment) throws IOException {
+  public static void createIndex(Directory dir, boolean multiSegment) throws IOException {
     IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
     
     w.setMergePolicy(new LogDocMergePolicy());
