Index: java/org/apache/lucene/index/DirectoryIndexReader.java
===================================================================
--- java/org/apache/lucene/index/DirectoryIndexReader.java	(revision 674202)
+++ java/org/apache/lucene/index/DirectoryIndexReader.java	(working copy)
@@ -112,23 +112,32 @@
       return (DirectoryIndexReader) finder.doBody(commit.getSegmentsFileName());
     }
   }
-
-  public final synchronized IndexReader reopen() throws CorruptIndexException, IOException {
+  
+  protected final synchronized IndexReader doReopenOrClone(final boolean doClone) throws CorruptIndexException, IOException {
     ensureOpen();
 
-    if (this.hasChanges || this.isCurrent()) {
-      // this has changes, therefore we have the lock and don't need to reopen
-      // OR: the index in the directory hasn't changed - nothing to do here
-      return this;
+    if (doClone) {
+      if (hasChanges)
+        throw new IllegalStateException("cannot clone() a reader with uncommitted changes");
+    } else {
+      if (hasChanges || isCurrent())
+        // If we have changes we have the write lock and we
+        // are already "current", or the index in the
+        // directory hasn't changed - nothing to do here
+        return this;
     }
 
     return (DirectoryIndexReader) new SegmentInfos.FindSegmentsFile(directory) {
 
       protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
-        SegmentInfos infos = new SegmentInfos();
-        infos.read(directory, segmentFileName);
-
-        DirectoryIndexReader newReader = doReopen(infos);
+        SegmentInfos infos = null;
+        if (doClone) {
+          infos = (SegmentInfos) segmentInfos.clone();
+        } else {
+          infos = new SegmentInfos();
+          infos.read(directory, segmentFileName);
+        }
+        DirectoryIndexReader newReader = doReopenOrClone(infos, doClone);
         
         if (DirectoryIndexReader.this != newReader) {
           newReader.init(directory, infos, closeDirectory);
@@ -143,7 +152,7 @@
   /**
    * Re-opens the index using the passed-in SegmentInfos 
    */
-  protected abstract DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException;
+  protected abstract DirectoryIndexReader doReopenOrClone(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException;
   
   public void setDeletionPolicy(IndexDeletionPolicy deletionPolicy) {
     this.deletionPolicy = deletionPolicy;
Index: java/org/apache/lucene/index/FieldsReader.java
===================================================================
--- java/org/apache/lucene/index/FieldsReader.java	(revision 674202)
+++ java/org/apache/lucene/index/FieldsReader.java	(working copy)
@@ -130,6 +130,34 @@
   }
 
   /**
+   * For use by clone
+   * @param fieldInfos
+   * @param cloneableFieldsStream
+   * @param fieldsStream
+   * @param indexStream
+   * @param numTotalDocs
+   * @param size
+   * @param format
+   * @param formatSize
+   */
+  private FieldsReader(FieldInfos fieldInfos, IndexInput cloneableFieldsStream, IndexInput fieldsStream, 
+      IndexInput indexStream, int numTotalDocs, int size, int format, int formatSize) {
+    this.fieldInfos = fieldInfos;
+    this.cloneableFieldsStream = cloneableFieldsStream;
+    this.fieldsStream = fieldsStream;
+    this.indexStream = indexStream;
+    this.numTotalDocs = numTotalDocs;
+    this.size = size;
+    this.format = format;
+    this.formatSize = formatSize;
+  }
+  
+  public Object clone() {
+    return new FieldsReader(fieldInfos, (IndexInput)cloneableFieldsStream.clone(), (IndexInput)cloneableFieldsStream.clone(), 
+                            (IndexInput)indexStream.clone(), numTotalDocs, size, format, formatSize);
+  }
+  
+  /**
    * @throws AlreadyClosedException if this FieldsReader is closed
    */
   protected final void ensureOpen() throws AlreadyClosedException {
Index: java/org/apache/lucene/index/FilterIndexReader.java
===================================================================
--- java/org/apache/lucene/index/FilterIndexReader.java	(revision 674202)
+++ java/org/apache/lucene/index/FilterIndexReader.java	(working copy)
@@ -104,6 +104,10 @@
     this.in = in;
   }
 
+  public Object clone() {
+    return new FilterIndexReader(in);
+  }
+
   public Directory directory() {
     return in.directory();
   }
Index: java/org/apache/lucene/index/IndexReader.java
===================================================================
--- java/org/apache/lucene/index/IndexReader.java	(revision 674202)
+++ java/org/apache/lucene/index/IndexReader.java	(working copy)
@@ -54,7 +54,7 @@
 
  @version $Id$
 */
-public abstract class IndexReader {
+public abstract class IndexReader implements Cloneable {
 
   /**
    * Constants describing field properties, for example used for
@@ -270,9 +270,21 @@
    * @throws IOException if there is a low-level IO error
    */  
   public synchronized IndexReader reopen() throws CorruptIndexException, IOException {
-    throw new UnsupportedOperationException("This reader does not support reopen().");
+    return doReopenOrClone(false);
   }
+  
+  public Object clone() {
+    try {
+      return doReopenOrClone(true);
+    } catch (Exception exception) {
+      throw new RuntimeException(exception);
+    }
+  }
 
+  protected IndexReader doReopenOrClone(boolean forced) throws CorruptIndexException, IOException {
+    throw new UnsupportedOperationException("This reader does not support reopen() or clone()");  
+  }
+
   /** 
    * Returns the directory associated with this index.  The Default 
    * implementation returns the directory specified by subclasses when 
Index: java/org/apache/lucene/index/MultiReader.java
===================================================================
--- java/org/apache/lucene/index/MultiReader.java	(revision 674202)
+++ java/org/apache/lucene/index/MultiReader.java	(working copy)
@@ -88,7 +88,7 @@
   }
 
   /**
-   * Tries to reopen the subreaders.
+   * Tries to reopen or clone the subreaders.
    * <br>
    * If one or more subreaders could be re-opened (i. e. subReader.reopen() 
    * returned a new instance != subReader), then a new MultiReader instance 
@@ -106,7 +106,7 @@
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error 
    */
-  public IndexReader reopen() throws CorruptIndexException, IOException {
+  protected IndexReader doReopenOrClone(boolean doClone) throws CorruptIndexException, IOException {
     ensureOpen();
     
     boolean reopened = false;
@@ -116,7 +116,10 @@
     boolean success = false;
     try {
       for (int i = 0; i < subReaders.length; i++) {
-        newSubReaders[i] = subReaders[i].reopen();
+        if (doClone)
+          newSubReaders[i] = (IndexReader) subReaders[i].clone();
+        else 
+          newSubReaders[i] = subReaders[i].reopen();
         // if at least one of the subreaders was updated we remember that
         // and return a new MultiReader
         if (newSubReaders[i] != subReaders[i]) {
Index: java/org/apache/lucene/index/MultiSegmentReader.java
===================================================================
--- java/org/apache/lucene/index/MultiSegmentReader.java	(revision 674202)
+++ java/org/apache/lucene/index/MultiSegmentReader.java	(working copy)
@@ -70,7 +70,7 @@
   }
 
   /** This contructor is only used for {@link #reopen()} */
-  MultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache) throws IOException {
+  MultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache, boolean doClone) throws IOException {
     super(directory, infos, closeDirectory);
     
     // we put the old SegmentReaders in a map, that allows us
@@ -108,7 +108,7 @@
           // this is a new reader; in case we hit an exception we can close it safely
           newReader = SegmentReader.get(infos.info(i));
         } else {
-          newReader = (SegmentReader) newReaders[i].reopenSegment(infos.info(i));
+          newReader = (SegmentReader) newReaders[i].reopenSegment(infos.info(i), doClone);
         }
         if (newReader == newReaders[i]) {
           // this reader will be shared between the old and the new one,
@@ -193,14 +193,14 @@
     starts[subReaders.length] = maxDoc;
   }
 
-  protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException {
+  protected synchronized DirectoryIndexReader doReopenOrClone(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException {
     if (infos.size() == 1) {
       // The index has only one segment now, so we can't refresh the MultiSegmentReader.
       // Return a new SegmentReader instead
       SegmentReader newReader = SegmentReader.get(infos, infos.info(0), false);
       return newReader;
     } else {
-      return new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache);
+      return new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache, doClone);
     }            
   }
 
Index: java/org/apache/lucene/index/ParallelReader.java
===================================================================
--- java/org/apache/lucene/index/ParallelReader.java	(revision 674202)
+++ java/org/apache/lucene/index/ParallelReader.java	(working copy)
@@ -142,7 +142,7 @@
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error 
    */
-  public IndexReader reopen() throws CorruptIndexException, IOException {
+  protected IndexReader doReopenOrClone(boolean doClone) throws CorruptIndexException, IOException {
     ensureOpen();
     
     boolean reopened = false;
@@ -155,7 +155,11 @@
     
       for (int i = 0; i < readers.size(); i++) {
         IndexReader oldReader = (IndexReader) readers.get(i);
-        IndexReader newReader = oldReader.reopen();
+        final IndexReader newReader;
+        if (doClone)
+          newReader = (IndexReader) oldReader.clone();
+        else
+          newReader = oldReader.reopen();
         newReaders.add(newReader);
         // if at least one of the subreaders was updated we remember that
         // and return a new MultiReader
Index: java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- java/org/apache/lucene/index/SegmentReader.java	(revision 674202)
+++ java/org/apache/lucene/index/SegmentReader.java	(working copy)
@@ -40,33 +40,35 @@
  * @version $Id$
  */
 class SegmentReader extends DirectoryIndexReader {
-  private String segment;
-  private SegmentInfo si;
-  private int readBufferSize;
+  protected String segment;
+  protected SegmentInfo si;
+  protected int readBufferSize;
 
   FieldInfos fieldInfos;
-  private FieldsReader fieldsReader;
+  protected FieldsReader fieldsReaderCloneable;
 
   TermInfosReader tis;
   TermVectorsReader termVectorsReaderOrig = null;
-  ThreadLocal termVectorsLocal = new ThreadLocal();
-
+  TermVectorsReaderLocal termVectorsLocal;
+  FieldsReaderLocal fieldsReaderLocal;
+    
   BitVector deletedDocs = null;
-  private boolean deletedDocsDirty = false;
-  private boolean normsDirty = false;
-  private boolean undeleteAll = false;
-  private int pendingDeleteCount;
+  CopyOnWriteRef deletedDocsCopyOnWriteRef;
+  protected boolean deletedDocsDirty = false;
+  protected boolean normsDirty = false;
+  protected boolean undeleteAll = false;
+  protected int pendingDeleteCount;
 
-  private boolean rollbackDeletedDocsDirty = false;
-  private boolean rollbackNormsDirty = false;
-  private boolean rollbackUndeleteAll = false;
-  private int rollbackPendingDeleteCount;
+  protected boolean rollbackDeletedDocsDirty = false;
+  protected boolean rollbackNormsDirty = false;
+  protected boolean rollbackUndeleteAll = false;
+  protected int rollbackPendingDeleteCount;
 
   IndexInput freqStream;
   IndexInput proxStream;
 
   // optionally used for the .nrm file shared by multiple norms
-  private IndexInput singleNormStream;
+  protected IndexInput singleNormStream;
 
   // Compound File Reader when based on a compound file segment
   CompoundFileReader cfsReader = null;
@@ -74,9 +76,31 @@
   
   // indicates the SegmentReader with which the resources are being shared,
   // in case this is a re-opened reader
-  private SegmentReader referencedSegmentReader = null;
+  protected SegmentReader referencedSegmentReader = null;
   
-  private class Norm {
+  protected class CopyOnWriteRef {
+    private int refCount = 0;
+    
+    public synchronized int refCount() {
+      return refCount;
+    }
+    
+    public synchronized void incRef() {
+      refCount++;
+    }
+    
+    public synchronized void decRef() {
+      refCount--;
+    }
+  }
+  
+  protected byte[] cloneNormBytes(byte[] bytes) {
+    byte[] cloneBytes = new byte[bytes.length];
+    System.arraycopy(bytes, 0, cloneBytes, 0, bytes.length);
+    return cloneBytes;
+  }
+  
+  protected class Norm implements Cloneable {
     volatile int refCount;
     boolean useSingleNormStream;
     
@@ -94,6 +118,28 @@
 
     }
     
+    public void copyBytes() {
+      assert copyOnWriteRef != null;
+      if (bytes != null) {
+        byte[] cloneBytes = cloneNormBytes(bytes);
+        bytes = cloneBytes;
+      }
+    }
+    
+    public Norm cloneRefBytes() {
+      Norm clone = new Norm(in, useSingleNormStream, number, normSeek);
+      clone.bytes = bytes;
+      return clone;
+    }
+    
+    public Object clone() {
+      Norm clone = new Norm(in, useSingleNormStream, number, normSeek);
+      if (bytes != null) {
+        clone.bytes = cloneNormBytes(bytes);
+      }
+      return clone;
+    }
+    
     public Norm(IndexInput in, boolean useSingleNormStream, int number, long normSeek)
     {
       refCount = 1;
@@ -103,14 +149,15 @@
       this.useSingleNormStream = useSingleNormStream;
     }
 
-    private IndexInput in;
-    private byte[] bytes;
-    private boolean dirty;
-    private int number;
-    private long normSeek;
-    private boolean rollbackDirty;
+    protected IndexInput in;
+    protected byte[] bytes;
+    protected boolean dirty;
+    protected int number;
+    protected long normSeek;
+    protected boolean rollbackDirty;
+    protected CopyOnWriteRef copyOnWriteRef;
 
-    private void reWrite(SegmentInfo si) throws IOException {
+    protected void reWrite(SegmentInfo si) throws IOException {
       // NOTE: norms are re-written in regular directory, not cfs
       si.advanceNormGen(this.number);
       IndexOutput out = directory().createOutput(si.getNormFileName(this.number));
@@ -126,7 +173,7 @@
      * It is still valid to access all other norm properties after close is called.
      * @throws IOException
      */
-    private synchronized void close() throws IOException {
+    protected synchronized void close() throws IOException {
       if (in != null && !useSingleNormStream) {
         in.close();
       }
@@ -265,6 +312,10 @@
     return instance;
   }
 
+  /** So a subclass can do its own initializing. */
+  protected void doInitialize() {
+  }
+  
   private void initialize(SegmentInfo si, int readBufferSize, boolean doOpenStores) throws CorruptIndexException, IOException {
     segment = si.name;
     this.si = si;
@@ -306,12 +357,12 @@
         fieldsSegment = segment;
 
       if (doOpenStores) {
-        fieldsReader = new FieldsReader(storeDir, fieldsSegment, fieldInfos, readBufferSize,
+        fieldsReaderCloneable = new FieldsReader(storeDir, fieldsSegment, fieldInfos, readBufferSize,
                                         si.getDocStoreOffset(), si.docCount);
-
+        fieldsReaderLocal = new FieldsReaderLocal(fieldsReaderCloneable);
         // Verify two sources of "maxDoc" agree:
-        if (si.getDocStoreOffset() == -1 && fieldsReader.size() != si.docCount) {
-          throw new CorruptIndexException("doc counts differ for segment " + si.name + ": fieldsReader shows " + fieldsReader.size() + " but segmentInfo shows " + si.docCount);
+        if (si.getDocStoreOffset() == -1 && fieldsReaderCloneable.size() != si.docCount) {
+          throw new CorruptIndexException("doc counts differ for segment " + si.name + ": fieldsReader shows " + fieldsReaderCloneable.size() + " but segmentInfo shows " + si.docCount);
         }
       }
 
@@ -332,7 +383,9 @@
         else
           vectorsSegment = segment;
         termVectorsReaderOrig = new TermVectorsReader(storeDir, vectorsSegment, fieldInfos, readBufferSize, si.getDocStoreOffset(), si.docCount);
+        termVectorsLocal = new TermVectorsReaderLocal(termVectorsReaderOrig);
       }
+      doInitialize();
       success = true;
     } finally {
 
@@ -364,26 +417,36 @@
       assert si.getDelCount() == 0;
   }
   
-  protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException {
+  protected synchronized DirectoryIndexReader doReopenOrClone(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException {
+
+    if (deletedDocsDirty || normsDirty) {
+      throw new IllegalStateException("Cannot clone IndexReader with pending changes deletedDocsDirty: "+deletedDocsDirty+" normsDirty: "+normsDirty);
+    }
+
     DirectoryIndexReader newReader;
     
     if (infos.size() == 1) {
       SegmentInfo si = infos.info(0);
       if (segment.equals(si.name) && si.getUseCompoundFile() == SegmentReader.this.si.getUseCompoundFile()) {
-        newReader = reopenSegment(si);
+        newReader = reopenSegment(si, doClone);
       } else { 
         // segment not referenced anymore, reopen not possible
         // or segment format changed
         newReader = SegmentReader.get(infos, infos.info(0), false);
       }
     } else {
-      return new MultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null);
+      return new MultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, doClone);
     }
     
     return newReader;
   }
   
-  synchronized SegmentReader reopenSegment(SegmentInfo si) throws CorruptIndexException, IOException {
+  protected BitVector cloneDeletedDocs(BitVector deletedDocs) {
+    if (this.deletedDocs == null) return null;
+    return (BitVector)deletedDocs.clone();
+  }
+  
+  synchronized SegmentReader reopenSegment(SegmentInfo si, boolean doClone) throws CorruptIndexException, IOException {
     boolean deletionsUpToDate = (this.si.hasDeletions() == si.hasDeletions()) 
                                   && (!si.hasDeletions() || this.si.getDelFileName().equals(si.getDelFileName()));
     boolean normsUpToDate = true;
@@ -399,13 +462,16 @@
       }
     }
 
-    if (normsUpToDate && deletionsUpToDate) {
+    if ((normsUpToDate && deletionsUpToDate) && !doClone) {
       return this;
     }    
     
-
-      // clone reader
-    SegmentReader clone = new SegmentReader();
+    SegmentReader clone;
+    try {
+      clone = (SegmentReader)IMPL.newInstance();
+    } catch (Exception e) {
+      throw new RuntimeException("cannot load SegmentReader class: " + e, e);
+    }
     boolean success = false;
     try {
       clone.directory = directory;
@@ -414,13 +480,14 @@
       clone.readBufferSize = readBufferSize;
       clone.cfsReader = cfsReader;
       clone.storeCFSReader = storeCFSReader;
-  
+      clone.fieldsReaderCloneable = fieldsReaderCloneable;
       clone.fieldInfos = fieldInfos;
       clone.tis = tis;
       clone.freqStream = freqStream;
       clone.proxStream = proxStream;
       clone.termVectorsReaderOrig = termVectorsReaderOrig;
-  
+      clone.fieldsReaderLocal = fieldsReaderLocal;
+      clone.termVectorsLocal = termVectorsLocal;
       
       // we have to open a new FieldsReader, because it is not thread-safe
       // and can thus not be shared among multiple SegmentReaders
@@ -440,13 +507,7 @@
           storeDir = cfsReader;
         }
       }
-  
-      if (fieldsReader != null) {
-        clone.fieldsReader = new FieldsReader(storeDir, fieldsSegment, fieldInfos, readBufferSize,
-                                        si.getDocStoreOffset(), si.docCount);
-      }
       
-      
       if (!deletionsUpToDate) {
         // load deleted docs
         clone.deletedDocs = null;
@@ -455,7 +516,7 @@
         clone.deletedDocs = this.deletedDocs;
       }
   
-      clone.norms = new HashMap();
+      //clone.norms = new HashMap(); already performed at variable declaration
       if (!normsUpToDate) {
         // load norms
         for (int i = 0; i < fieldNormsChanged.length; i++) {
@@ -495,8 +556,37 @@
             }
           }
         }  
-      }    
-  
+      } 
+      // Make norms copyOnWrite and create a new norm object so 
+      // there is no need to have synchronized norms map.
+      // There is a reference to the byte array.  If this reader
+      // or the cloned reader wants to write to the byte array they now 
+      // have to make a copy first.  If the this reader 
+      // did not make a copy on write then the updates would be seen
+      // by the cloned reader which is not desired behavior for a clone
+      // which should be a copy at the point clone was called
+      if (doClone) {
+        if (clone.deletedDocs != null) {
+          deletedDocsCopyOnWriteRef = new CopyOnWriteRef();
+          deletedDocsCopyOnWriteRef.incRef();
+          clone.deletedDocsCopyOnWriteRef = deletedDocsCopyOnWriteRef;
+          clone.deletedDocsCopyOnWriteRef.incRef();
+        }
+        Iterator iterator = clone.norms.entrySet().iterator();
+        while (iterator.hasNext()) {
+          Map.Entry entry = (Map.Entry)iterator.next();
+          String field = (String)entry.getKey();
+          Norm norm = (Norm)entry.getValue();
+          if (norm.copyOnWriteRef == null) {
+            norm.copyOnWriteRef = new CopyOnWriteRef();
+          }
+          norm.copyOnWriteRef.incRef();
+          Norm clonedNorm = norm.cloneRefBytes();
+          clonedNorm.copyOnWriteRef = norm.copyOnWriteRef;
+          clonedNorm.copyOnWriteRef.incRef();
+          clone.norms.put(field, clonedNorm); // replace norm object with cloneRefBytes
+        }
+      }
       success = true;
     } finally {
       if (this.referencedSegmentReader != null) {
@@ -553,10 +643,6 @@
     undeleteAll = false;
   }
 
-  FieldsReader getFieldsReader() {
-    return fieldsReader;
-  }
-
   protected void doClose() throws IOException {
     boolean hasReferencedReader = (referencedSegmentReader != null);
     
@@ -576,17 +662,14 @@
       singleNormStream = null;
     }
     
-    // re-opened SegmentReaders have their own instance of FieldsReader
-    if (fieldsReader != null) {
-      fieldsReader.close();
-    }
-
     if (!hasReferencedReader) { 
       // close everything, nothing is shared anymore with other readers
       if (tis != null) {
         tis.close();
       }
-  
+      if (fieldsReaderCloneable != null) {
+        fieldsReaderCloneable.close();
+      }
       if (freqStream != null)
         freqStream.close();
       if (proxStream != null)
@@ -625,6 +708,16 @@
   }
 
   protected void doDelete(int docNum) {
+    if (deletedDocsCopyOnWriteRef != null && deletedDocs != null) {
+      assert deletedDocsCopyOnWriteRef.refCount() > 0;
+      if (deletedDocsCopyOnWriteRef.refCount() > 1) {
+        deletedDocs = cloneDeletedDocs(deletedDocs);
+        deletedDocsCopyOnWriteRef.decRef();
+      } else {
+        // just use the deletedDocs we have and remove this copyOnWriteRef
+        deletedDocsCopyOnWriteRef = null;
+      }
+    }
     if (deletedDocs == null)
       deletedDocs = new BitVector(maxDoc());
     deletedDocsDirty = true;
@@ -661,11 +754,12 @@
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error
    */
-  public synchronized Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
+  public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
     ensureOpen();
     if (isDeleted(n))
       throw new IllegalArgumentException
               ("attempt to access a deleted document");
+    FieldsReader fieldsReader = getFieldsReader();
     return fieldsReader.doc(n, fieldSelector);
   }
 
@@ -808,7 +902,18 @@
     Norm norm = (Norm) norms.get(field);
     if (norm == null)                             // not an indexed field
       return;
-
+    synchronized (norm) {
+      if (norm.copyOnWriteRef != null) {
+        assert norm.copyOnWriteRef.refCount() > 0;
+        if (norm.copyOnWriteRef.refCount() == 1) {
+          // use the bytes as is, no more references to them
+          norm.copyOnWriteRef = null;
+        } else if (norm.copyOnWriteRef.refCount() > 1) {
+          norm.copyBytes();
+          norm.copyOnWriteRef.decRef();
+        } 
+      }
+    }
     norm.dirty = true;                            // mark it dirty
     normsDirty = true;
 
@@ -910,16 +1015,43 @@
   }
 
   /**
+   * Create a clone from the initial FieldsReader and store it in the ThreadLocal.
+   * @return FieldsReader
+   */
+  FieldsReader getFieldsReader() {
+    return (FieldsReader) fieldsReaderLocal.get();
+  }
+  
+  public static class FieldsReaderLocal extends ThreadLocal {
+    private FieldsReader fieldsReaderCloneable;
+    
+    public FieldsReaderLocal(FieldsReader fieldsReaderCloneable) {
+      this.fieldsReaderCloneable = fieldsReaderCloneable;
+    }
+    
+    protected Object initialValue() {
+      return fieldsReaderCloneable.clone();
+    }
+  }
+  
+  public static class TermVectorsReaderLocal extends ThreadLocal {
+    private TermVectorsReader termVectorsReaderOrig;
+    
+    public TermVectorsReaderLocal(TermVectorsReader termVectorsReaderOrig) {
+      this.termVectorsReaderOrig = termVectorsReaderOrig;
+    }
+    
+    protected Object initialValue() {
+      return termVectorsReaderOrig.clone();
+    }
+  }
+  
+  /**
    * Create a clone from the initial TermVectorsReader and store it in the ThreadLocal.
    * @return TermVectorsReader
    */
   private TermVectorsReader getTermVectorsReader() {
-    TermVectorsReader tvReader = (TermVectorsReader)termVectorsLocal.get();
-    if (tvReader == null) {
-      tvReader = (TermVectorsReader)termVectorsReaderOrig.clone();
-      termVectorsLocal.set(tvReader);
-    }
-    return tvReader;
+    return (TermVectorsReader) termVectorsLocal.get();
   }
   
   /** Return a term frequency vector for the specified document and field. The
Index: java/org/apache/lucene/util/BitVector.java
===================================================================
--- java/org/apache/lucene/util/BitVector.java	(revision 674135)
+++ java/org/apache/lucene/util/BitVector.java	(working copy)
@@ -35,7 +35,7 @@
 
   @version $Id$
   */
-public final class BitVector {
+public final class BitVector implements Cloneable {
 
   private byte[] bits;
   private int size;
@@ -46,7 +46,27 @@
     size = n;
     bits = new byte[(size >> 3) + 1];
   }
-
+  
+  public BitVector(byte[] bits, int size, int count) {
+    this.bits = bits;
+    this.size = size;
+    this.count = count;
+  }
+  
+  public Object clone() {
+    BitVector clone = new BitVector(size);
+    clone.count = count;
+    if (bits != null) {
+      clone.bits = new byte[bits.length];
+      System.arraycopy(bits, 0, clone.bits, 0, bits.length);
+    }
+    return clone;
+  }
+  
+  public byte[] getBits() {
+    return bits;
+  }
+  
   /** Sets the value of <code>bit</code> to one. */
   public final void set(int bit) {
     if (bit >= size) {
Index: test/org/apache/lucene/index/TestIndexReaderClone.java
===================================================================
--- test/org/apache/lucene/index/TestIndexReaderClone.java	(revision 0)
+++ test/org/apache/lucene/index/TestIndexReaderClone.java	(revision 0)
@@ -0,0 +1,211 @@
+package org.apache.lucene.index;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.lucene.index.SegmentReader.Norm;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.NoLockFactory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BitVector;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestIndexReaderClone extends LuceneTestCase {
+  static Random random = new Random(System.currentTimeMillis());
+  
+  public void testNorms() throws Exception {
+    Map normChanges1 = new HashMap();
+    Map normChanges2 = new HashMap();
+    final Directory directory1 = new RAMDirectory();
+    directory1.setLockFactory(new NoLockFactory());
+    TestIndexReaderReopen.createIndex(directory1, false);
+    IndexReader index1 = IndexReader.open(directory1);
+    for (int x=0; x < 2; x++) {
+      changeNormsRandom(index1, "field1", normChanges1);
+    }
+    index1.flush();
+    IndexReader index2 = (IndexReader)index1.clone();
+    
+    TestIndexReader.assertIndexEquals(index1, index2);
+    
+    SegmentReader segmentReader1 = (SegmentReader)index1;
+    SegmentReader segmentReader2 = (SegmentReader)index2;
+    
+    assertNormChanges(segmentReader1, "field1", normChanges1, true);
+    assertNormChanges(segmentReader2, "field1", normChanges1, true);
+    
+    for (int x=0; x < 5; x++) {
+      changeNormsRandom(index2, "field1", normChanges2);
+    }
+    assertNormChanges(segmentReader2, "field1", normChanges2, true);
+    
+    changeNorms(segmentReader1, "field1", normChanges2.keySet());
+    assertNormChanges(segmentReader1, "field1", normChanges2, false);
+    // should be no CopyOnWriteRef in norms that were modified
+    // because both readers have made modifications to the norms
+    assertNormCopyOnWriteRef(segmentReader1, "field1", false);
+  }
+  
+  protected void changeNorms(IndexReader reader, String field, Set set) throws Exception {
+    byte[] norms = reader.norms(field);
+    Iterator i = set.iterator();
+    while (i.hasNext()) {
+      Integer doc = (Integer)i.next();
+      while (true) {
+        float normValue = random.nextFloat();
+        byte normByte = Similarity.encodeNorm(normValue);
+        if (norms[doc.intValue()] != normByte) {
+          reader.setNorm(doc.intValue(), field, normByte);
+          break;
+        } 
+      }
+    }
+  }
+  
+  protected void assertNormCopyOnWriteRef(SegmentReader segmentReader, String field, boolean copyOnWriteRef) {
+    Norm norm = (Norm)segmentReader.norms.get(field);
+    if (copyOnWriteRef) {
+      assertTrue(norm.copyOnWriteRef != null);
+    } else {
+      assertTrue(norm.copyOnWriteRef == null);
+    }
+  }
+  
+  protected void assertNormChanges(SegmentReader segmentReader, String field, Map normChanges, boolean isEqual) {
+    Norm norm = (Norm)segmentReader.norms.get(field);
+    Iterator iterator = normChanges.entrySet().iterator();
+    while (iterator.hasNext()) {
+      Map.Entry entry = (Map.Entry)iterator.next();
+      Integer doc = (Integer)entry.getKey();
+      Byte value = (Byte)entry.getValue();
+      if (isEqual) assertEquals(value.byteValue(), norm.bytes[doc.intValue()]);
+      else assertTrue(value.byteValue() != norm.bytes[doc.intValue()]); 
+    }
+  }
+  
+  protected void changeNormsRandom(IndexReader reader, String field, Map normChanges) throws Exception {
+    int doc = random.nextInt(reader.maxDoc());
+    if (!normChanges.containsKey(new Integer(doc))) {
+      float normValue = random.nextFloat();
+      byte normByte = Similarity.encodeNorm(normValue);
+      reader.setNorm(doc, field, normByte);
+      normChanges.put(new Integer(doc), new Byte(normByte));
+    } else {
+      changeNormsRandom(reader, field, normChanges);
+    }
+  }
+  
+  public void testDeletes() throws Exception {
+    final Directory directory1 = new RAMDirectory();
+    directory1.setLockFactory(new NoLockFactory());
+    TestIndexReaderReopen.createIndex(directory1, false);
+    Set deleted1 = new HashSet();
+    
+    IndexReader index1 = IndexReader.open(directory1);
+    for (int x=0; x < 3; x++) {
+      deleteRandom(index1, deleted1, null);
+    }
+    index1.flush();
+    IndexReader index2 = (IndexReader)index1.clone();
+    TestIndexReader.assertIndexEquals(index1, index2);
+    Set deleted2 = new HashSet();
+    for (int x=0; x < 2; x++) {
+      deleteRandom(index2, deleted2, deleted1);
+    }
+    
+    SegmentReader segmentReader1 = (SegmentReader)index1;
+    SegmentReader segmentReader2 = (SegmentReader)index2;
+    
+    assertEquals(segmentReader1.deletedDocsCopyOnWriteRef.refCount(), 1);
+    assertNull(segmentReader2.deletedDocsCopyOnWriteRef);
+    deleteRandom(index1, new HashSet(), deleted1);
+    assertNull(segmentReader1.deletedDocsCopyOnWriteRef);
+    
+    BitVector deletedDocs1 = segmentReader1.deletedDocs;
+    BitVector deletedDocs2 = segmentReader2.deletedDocs;
+    assertTrue(deletedDocs1 != deletedDocs2);
+    assertTrue(segmentReader1.deletedDocs == deletedDocs1);
+    
+    index2.flush();
+    
+    assertHasDeletedDocs(index1, deleted1);
+    assertHasDeletedDocs(index2, deleted1);
+    assertHasDeletedDocs(index2, deleted2);
+    assertHasNotDeletedDocs(index1, deleted2);
+    /**
+    IndexReader index3 = (IndexReader)index2.clone();
+    SegmentReader segmentReader3 = (SegmentReader)index3;
+    
+    
+    assertEquals(segmentReader1.deletedDocsCopyOnWriteRef.refCount(), 1);
+    deleteRandom(index1, new HashSet(), deleted1);
+    assertNull(segmentReader1.deletedDocsCopyOnWriteRef);
+    assertTrue(segmentReader1.deletedDocs == deletedDocs1);
+    **/
+  }
+  
+  public void assertHasNotDeletedDocs(IndexReader index, Set docs1) {
+    Iterator i = docs1.iterator();
+    while (i.hasNext()) {
+      Integer doc = (Integer)i.next();
+      assertFalse(index.isDeleted(doc.intValue()));
+    }
+  }
+  
+  public void assertHasDeletedDocs(IndexReader index, Set docs1) {
+    Iterator i = docs1.iterator();
+    while (i.hasNext()) {
+      Integer doc = (Integer)i.next();
+      assertTrue(index.isDeleted(doc.intValue()));
+    }
+  }
+  /**
+  public class RegularTest extends TestReopen {
+    Directory directory;
+    IndexReader reader;
+    
+    public RegularTest(Directory directory) {
+      this.directory = directory;
+    }
+
+    protected IndexReader openReader() throws IOException {
+      reader = IndexReader.open(directory);
+      return reader;
+    }
+    
+    protected void modifyIndex(int i) throws IOException {
+      TestIndexReaderClone.modifyIndex(i, reader);
+    }
+  }
+  **/
+  public static void deleteRandom(IndexReader reader, Set deleted, Set exclude) throws IOException {
+    int doc = random.nextInt(reader.maxDoc());
+    if (exclude == null) {
+      reader.deleteDocument(doc);
+      deleted.add(new Integer(doc));
+    } else if (!exclude.contains(new Integer(doc))) {
+      reader.deleteDocument(doc);
+      deleted.add(new Integer(doc));
+    } else {
+      deleteRandom(reader, deleted, exclude);
+    }
+  }
+  /**
+  public static void modifyIndex(int i, IndexReader reader) throws IOException {
+    if (i == 0) {
+      reader.deleteDocuments(new Term("field2", "a11"));
+      reader.deleteDocuments(new Term("field2", "b30"));
+    } else if (i == 1) {
+      reader.setNorm(4, "field1", 123);
+      reader.setNorm(44, "field2", 222);
+      reader.setNorm(44, "field4", 22);
+    } 
+  }
+  **/
+}
