Index: lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java	(revision 1147159)
+++ lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java	(working copy)
@@ -240,7 +240,7 @@
           delCount += applyQueryDeletes(packet.queriesIterable(), reader);
           segAllDeletes = reader.numDocs() == 0;
         } finally {
-          readerPool.release(reader);
+          readerPool.release(reader, IOContext.Context.READ);
         }
         anyNewDeletes |= delCount > 0;
 
@@ -282,7 +282,7 @@
             delCount += applyQueryDeletes(coalescedDeletes.queriesIterable(), reader);
             segAllDeletes = reader.numDocs() == 0;
           } finally {
-            readerPool.release(reader);
+            readerPool.release(reader, IOContext.Context.READ);
           }
           anyNewDeletes |= delCount > 0;
 
Index: lucene/src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IndexWriter.java	(revision 1147159)
+++ lucene/src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -49,6 +49,7 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FlushInfo;
 import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IOContext.Context;
 import org.apache.lucene.store.Lock;
 import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.lucene.store.MergeInfo;
@@ -416,18 +417,44 @@
    *  has been called on this instance). */
 
   class ReaderPool {
+    
+    final class SegmentCacheKey {
+      public final SegmentInfo si;
+      public final IOContext.Context context;
+      
+      public SegmentCacheKey(SegmentInfo segInfo, IOContext.Context context) {
+        assert context == IOContext.Context.MERGE || context == IOContext.Context.READ;
+        this.si = segInfo;
+        this.context = context;
+      }
+      
+      @Override
+      public int hashCode() {
+        return si.hashCode() + context.hashCode();
+      }
+      
+      public boolean equals(SegmentCacheKey key) {
+        
+        if (si.equals(key.si) && context.equals(key.context)) {
+          return true;
+        }
+                  
+        return false;
+      }
+      
+    }
 
-    private final Map<SegmentInfo,SegmentReader> readerMap = new HashMap<SegmentInfo,SegmentReader>();
+    private final Map<SegmentCacheKey,SegmentReader> readerMap = new HashMap<SegmentCacheKey,SegmentReader>();
 
     /** Forcefully clear changes for the specified segments.  This is called on successful merge. */
     synchronized void clear(List<SegmentInfo> infos) throws IOException {
       if (infos == null) {
-        for (Map.Entry<SegmentInfo,SegmentReader> ent: readerMap.entrySet()) {
+        for (Map.Entry<SegmentCacheKey,SegmentReader> ent: readerMap.entrySet()) {
           ent.getValue().hasChanges = false;
         }
       } else {
         for (final SegmentInfo info: infos) {
-          final SegmentReader r = readerMap.get(info);
+          final SegmentReader r = readerMap.get(new SegmentCacheKey(info, IOContext.Context.MERGE));
           if (r != null) {
             r.hasChanges = false;
           }
@@ -460,8 +487,8 @@
      * @param sr
      * @throws IOException
      */
-    public synchronized boolean release(SegmentReader sr) throws IOException {
-      return release(sr, false);
+    public synchronized boolean release(SegmentReader sr, IOContext.Context context) throws IOException {
+      return release(sr, false, context);
     }
 
     /**
@@ -475,9 +502,17 @@
      */
     public synchronized boolean release(SegmentReader sr, boolean drop) throws IOException {
 
-      final boolean pooled = readerMap.containsKey(sr.getSegmentInfo());
+      SegmentCacheKey cacheKey;
+      cacheKey = new SegmentCacheKey(sr.getSegmentInfo(), IOContext.Context.READ);      
+      
+      boolean pooled = readerMap.containsKey(cacheKey);
+      
+      if (readerMap.get(cacheKey) != sr) {
+        cacheKey = new SegmentCacheKey(sr.getSegmentInfo(), IOContext.Context.MERGE);
+        pooled = readerMap.containsKey(cacheKey);
+      }
 
-      assert !pooled || readerMap.get(sr.getSegmentInfo()) == sr;
+      assert !pooled || readerMap.get(cacheKey) == sr;
 
       // Drop caller's ref; for an external reader (not
       // pooled), this decRef will close it
@@ -502,25 +537,74 @@
 
         // We are the last ref to this reader; since we're
         // not pooling readers, we release it:
-        readerMap.remove(sr.getSegmentInfo());
+        readerMap.remove(cacheKey);
 
         return hasChanges;
       }
 
       return false;
     }
+    
+    /**
+     * Release the segment reader (i.e. decRef it and close if there
+     * are no more references.
+     * @return true if this release altered the index (eg
+     * the SegmentReader had pending changes to del docs and
+     * was closed).  Caller must call checkpoint() if so.
+     * @param sr
+     * @throws IOException
+     */
+    public synchronized boolean release(SegmentReader sr, boolean drop, IOContext.Context context) throws IOException {
 
-    public synchronized void drop(List<SegmentInfo> infos) throws IOException {
+      SegmentCacheKey cacheKey = new SegmentCacheKey(sr.getSegmentInfo(), context);
+      
+      
+      final boolean pooled = readerMap.containsKey(cacheKey);
+
+      assert !pooled || readerMap.get(cacheKey) == sr;
+
+      // Drop caller's ref; for an external reader (not
+      // pooled), this decRef will close it
+      sr.decRef();
+
+      if (pooled && (drop || (!poolReaders && sr.getRefCount() == 1))) {
+
+        // We invoke deleter.checkpoint below, so we must be
+        // sync'd on IW if there are changes:
+        assert !sr.hasChanges || Thread.holdsLock(IndexWriter.this);
+
+        // Discard (don't save) changes when we are dropping
+        // the reader; this is used only on the sub-readers
+        // after a successful merge.
+        sr.hasChanges &= !drop;
+
+        final boolean hasChanges = sr.hasChanges;
+
+        // Drop our ref -- this will commit any pending
+        // changes to the dir
+        sr.close();
+
+        // We are the last ref to this reader; since we're
+        // not pooling readers, we release it:
+        readerMap.remove(cacheKey);
+
+        return hasChanges;
+      }
+
+      return false;
+    }
+
+    public synchronized void drop(List<SegmentInfo> infos, IOContext.Context context) throws IOException {
       for(SegmentInfo info : infos) {
-        drop(info);
+        drop(info, context);
       }
     }
 
-    public synchronized void drop(SegmentInfo info) throws IOException {
-      final SegmentReader sr = readerMap.get(info);
-      if (sr != null) {
+    public synchronized void drop(SegmentInfo info, IOContext.Context context) throws IOException {
+      final SegmentReader sr;
+      if ((sr = readerMap.remove(new SegmentCacheKey(info, context))) != null) {
         sr.hasChanges = false;
-        readerMap.remove(info);
+        readerMap.remove(new SegmentCacheKey(info, context));
         sr.close();
       }
     }
@@ -532,10 +616,10 @@
       // sync'd on IW:
       assert Thread.holdsLock(IndexWriter.this);
 
-      Iterator<Map.Entry<SegmentInfo,SegmentReader>> iter = readerMap.entrySet().iterator();
+      Iterator<Map.Entry<SegmentCacheKey,SegmentReader>> iter = readerMap.entrySet().iterator();
       while (iter.hasNext()) {
 
-        Map.Entry<SegmentInfo,SegmentReader> ent = iter.next();
+        Map.Entry<SegmentCacheKey,SegmentReader> ent = iter.next();
 
         SegmentReader sr = ent.getValue();
         if (sr.hasChanges) {
@@ -567,10 +651,9 @@
       // We invoke deleter.checkpoint below, so we must be
       // sync'd on IW:
       assert Thread.holdsLock(IndexWriter.this);
-
+      
       for (SegmentInfo info : infos) {
-
-        final SegmentReader sr = readerMap.get(info);
+        final SegmentReader sr = readerMap.get(new SegmentCacheKey(info, IOContext.Context.READ));
         if (sr != null && sr.hasChanges) {
           assert infoIsLive(info);
           sr.doCommit(null);
@@ -625,9 +708,8 @@
       //        readBufferSize = BufferedIndexInput.BUFFER_SIZE;
       //      }
       
-      // TODO: context should be part of the key used to cache that reader in the pool.
-
-      SegmentReader sr = readerMap.get(info);
+      SegmentCacheKey cacheKey = new SegmentCacheKey(info, context.context);
+      SegmentReader sr = readerMap.get(cacheKey);
       if (sr == null) {
         // TODO: we may want to avoid doing this while
         // synchronized
@@ -637,7 +719,7 @@
 
         if (info.dir == directory) {
           // Only pool if reader is not external
-          readerMap.put(info, sr);
+          readerMap.put(cacheKey, sr);
         }
       } else {
         if (doOpenStores) {
@@ -664,13 +746,31 @@
 
     // Returns a ref
     public synchronized SegmentReader getIfExists(SegmentInfo info) throws IOException {
-      SegmentReader sr = readerMap.get(info);
+      SegmentReader sr;
+      
+      SegmentCacheKey cacheKey = new SegmentCacheKey(info, IOContext.Context.READ);      
+      sr = readerMap.get(cacheKey);
+      if (sr == null) {
+        cacheKey = new SegmentCacheKey(info, IOContext.Context.MERGE);
+        sr = readerMap.get(cacheKey);
+      }   
+            
       if (sr != null) {
         sr.incRef();
       }
       return sr;
     }
-  }
+    
+    // Returns a ref
+    public synchronized SegmentReader getIfExists(SegmentInfo info, IOContext.Context context) throws IOException {
+      SegmentCacheKey cacheKey = new SegmentCacheKey(info, context);
+      SegmentReader sr = readerMap.get(cacheKey);
+      if (sr != null) {
+        sr.incRef();
+      }
+      return sr;
+    }
+  }  
 
   /**
    * Obtain the number of deleted docs for a pooled reader.
@@ -678,7 +778,7 @@
    * delCount is returned.
    */
   public int numDeletedDocs(SegmentInfo info) throws IOException {
-    SegmentReader reader = readerPool.getIfExists(info);
+    SegmentReader reader = readerPool.getIfExists(info, IOContext.Context.READ);
     try {
       if (reader != null) {
         return reader.numDeletedDocs();
@@ -687,7 +787,7 @@
       }
     } finally {
       if (reader != null) {
-        readerPool.release(reader);
+        readerPool.release(reader, IOContext.Context.READ);
       }
     }
   }
@@ -2863,7 +2963,7 @@
         if (!mergingSegments.contains(info)) {
           segmentInfos.remove(info);
           if (readerPool != null) {
-            readerPool.drop(info);
+            readerPool.drop(info, IOContext.Context.MERGE);
           }
         }
       }
@@ -3038,7 +3138,7 @@
     segmentInfos.applyMergeChanges(merge, dropSegment);
     
     if (dropSegment) {
-      readerPool.drop(merge.info);
+      readerPool.drop(merge.info, IOContext.Context.MERGE);
     }
     
     if (infoStream != null) {
@@ -3288,7 +3388,7 @@
         }
       }
       if (readerPool != null) {
-        readerPool.drop(result.allDeleted);
+        readerPool.drop(result.allDeleted, IOContext.Context.MERGE);
       }
       checkpoint();
     }
@@ -3379,7 +3479,7 @@
     for (int i = 0; i < numSegments; i++) {
       if (merge.readers.get(i) != null) {
         try {
-          anyChanges |= readerPool.release(merge.readers.get(i), drop);
+          anyChanges |= readerPool.release(merge.readers.get(i), drop, IOContext.Context.MERGE);
         } catch (Throwable t) {
           if (th == null) {
             th = t;
@@ -3592,7 +3692,7 @@
         }
       } finally {
         synchronized(this) {
-          if (readerPool.release(mergedReader)) {
+          if (readerPool.release(mergedReader, context.context)) {
             // Must checkpoint after releasing the
             // mergedReader since it may have written a new
             // deletes file:
@@ -3655,7 +3755,7 @@
   /** @lucene.internal */
   public synchronized String segString(SegmentInfo info) throws IOException {
     StringBuilder buffer = new StringBuilder();
-    SegmentReader reader = readerPool.getIfExists(info);
+    SegmentReader reader = readerPool.getIfExists(info, IOContext.Context.READ);
     try {
       if (reader != null) {
         buffer.append(reader.toString());
@@ -3667,7 +3767,7 @@
       }
     } finally {
       if (reader != null) {
-        readerPool.release(reader);
+        readerPool.release(reader, IOContext.Context.READ);
       }
     }
     return buffer.toString();
