Index: lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java	(revision 1144523)
+++ lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java	(working copy)
@@ -240,7 +240,7 @@
           delCount += applyQueryDeletes(packet.queriesIterable(), reader);
           segAllDeletes = reader.numDocs() == 0;
         } finally {
-          readerPool.release(reader);
+          readerPool.release(reader, IOContext.READ);
         }
         anyNewDeletes |= delCount > 0;
 
@@ -282,7 +282,7 @@
             delCount += applyQueryDeletes(coalescedDeletes.queriesIterable(), reader);
             segAllDeletes = reader.numDocs() == 0;
           } finally {
-            readerPool.release(reader);
+            readerPool.release(reader, IOContext.READ);
           }
           anyNewDeletes |= delCount > 0;
 
Index: lucene/src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IndexWriter.java	(revision 1144523)
+++ lucene/src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -416,18 +416,47 @@
    *  has been called on this instance). */
 
   class ReaderPool {
+    
+    class SegmentCacheKey {
+      SegmentInfo si;
+      IOContext.Context context;
+      
+      public SegmentCacheKey(SegmentInfo segInfo, IOContext.Context context) {
+        this.si = segInfo;
+        this.context = context;
+      }
+      
+      @Override
+      public int hashCode() {
+        return si.hashCode() + context.hashCode();
+      }
+      
+      @Override
+      public boolean equals(Object obj) {
+        if (this == obj)
+          return true;
+        if (obj == null)
+          return false;
+        if (getClass() != obj.getClass())
+          return false;        
+        return false;
+      }
+      
+    }
 
-    private final Map<SegmentInfo,SegmentReader> readerMap = new HashMap<SegmentInfo,SegmentReader>();
+    private final Map<SegmentCacheKey,SegmentReader> readerMap = new HashMap<SegmentCacheKey,SegmentReader>();
 
     /** Forcefully clear changes for the specified segments.  This is called on successful merge. */
     synchronized void clear(List<SegmentInfo> infos) throws IOException {
       if (infos == null) {
-        for (Map.Entry<SegmentInfo,SegmentReader> ent: readerMap.entrySet()) {
+        for (Map.Entry<SegmentCacheKey,SegmentReader> ent: readerMap.entrySet()) {
           ent.getValue().hasChanges = false;
         }
       } else {
+        IOContext context;
         for (final SegmentInfo info: infos) {
-          final SegmentReader r = readerMap.get(info);
+          context = new IOContext(new MergeInfo(info.docCount, info.sizeInBytes(true), true, false));
+          final SegmentReader r = readerMap.get(new SegmentCacheKey(info, context.context));
           if (r != null) {
             r.hasChanges = false;
           }
@@ -460,8 +489,8 @@
      * @param sr
      * @throws IOException
      */
-    public synchronized boolean release(SegmentReader sr) throws IOException {
-      return release(sr, false);
+    public synchronized boolean release(SegmentReader sr, IOContext context) throws IOException {
+      return release(sr, false, context);
     }
 
     /**
@@ -473,11 +502,13 @@
      * @param sr
      * @throws IOException
      */
-    public synchronized boolean release(SegmentReader sr, boolean drop) throws IOException {
+    public synchronized boolean release(SegmentReader sr, boolean drop, IOContext context) throws IOException {
 
-      final boolean pooled = readerMap.containsKey(sr.getSegmentInfo());
+      SegmentCacheKey cacheKey = new SegmentCacheKey(sr.getSegmentInfo(), context.context);
+      
+      final boolean pooled = readerMap.containsKey(cacheKey);
 
-      assert !pooled || readerMap.get(sr.getSegmentInfo()) == sr;
+      assert !pooled || readerMap.get(cacheKey) == sr;
 
       // Drop caller's ref; for an external reader (not
       // pooled), this decRef will close it
@@ -502,7 +533,7 @@
 
         // We are the last ref to this reader; since we're
         // not pooling readers, we release it:
-        readerMap.remove(sr.getSegmentInfo());
+        readerMap.remove(cacheKey);
 
         return hasChanges;
       }
@@ -510,17 +541,18 @@
       return false;
     }
 
-    public synchronized void drop(List<SegmentInfo> infos) throws IOException {
+    public synchronized void drop(List<SegmentInfo> infos, IOContext context) throws IOException {
       for(SegmentInfo info : infos) {
-        drop(info);
+        drop(info, context);
       }
     }
 
-    public synchronized void drop(SegmentInfo info) throws IOException {
-      final SegmentReader sr = readerMap.get(info);
+    public synchronized void drop(SegmentInfo info, IOContext context) throws IOException {
+      SegmentCacheKey cacheKey = new SegmentCacheKey(info, context.context);
+      final SegmentReader sr = readerMap.get(cacheKey);
       if (sr != null) {
         sr.hasChanges = false;
-        readerMap.remove(info);
+        readerMap.remove(cacheKey);
         sr.close();
       }
     }
@@ -532,10 +564,10 @@
       // sync'd on IW:
       assert Thread.holdsLock(IndexWriter.this);
 
-      Iterator<Map.Entry<SegmentInfo,SegmentReader>> iter = readerMap.entrySet().iterator();
+      Iterator<Map.Entry<SegmentCacheKey,SegmentReader>> iter = readerMap.entrySet().iterator();
       while (iter.hasNext()) {
 
-        Map.Entry<SegmentInfo,SegmentReader> ent = iter.next();
+        Map.Entry<SegmentCacheKey,SegmentReader> ent = iter.next();
 
         SegmentReader sr = ent.getValue();
         if (sr.hasChanges) {
@@ -567,10 +599,11 @@
       // We invoke deleter.checkpoint below, so we must be
       // sync'd on IW:
       assert Thread.holdsLock(IndexWriter.this);
-
+      
+      SegmentCacheKey cacheKey;
       for (SegmentInfo info : infos) {
-
-        final SegmentReader sr = readerMap.get(info);
+        cacheKey = new SegmentCacheKey(info, IOContext.Context.FLUSH);
+        final SegmentReader sr = readerMap.get(cacheKey);
         if (sr != null && sr.hasChanges) {
           assert infoIsLive(info);
           sr.doCommit(null);
@@ -626,8 +659,8 @@
       //      }
       
       // TODO: context should be part of the key used to cache that reader in the pool.
-
-      SegmentReader sr = readerMap.get(info);
+      SegmentCacheKey cacheKey = new SegmentCacheKey(info, context.context);
+      SegmentReader sr = readerMap.get(cacheKey);
       if (sr == null) {
         // TODO: we may want to avoid doing this while
         // synchronized
@@ -637,7 +670,7 @@
 
         if (info.dir == directory) {
           // Only pool if reader is not external
-          readerMap.put(info, sr);
+          readerMap.put(cacheKey, sr);
         }
       } else {
         if (doOpenStores) {
@@ -663,8 +696,9 @@
     }
 
     // Returns a ref
-    public synchronized SegmentReader getIfExists(SegmentInfo info) throws IOException {
-      SegmentReader sr = readerMap.get(info);
+    public synchronized SegmentReader getIfExists(SegmentInfo info, IOContext context) throws IOException {
+      SegmentCacheKey cacheKey = new SegmentCacheKey(info, context.context);
+      SegmentReader sr = readerMap.get(cacheKey);
       if (sr != null) {
         sr.incRef();
       }
@@ -678,7 +712,7 @@
    * delCount is returned.
    */
   public int numDeletedDocs(SegmentInfo info) throws IOException {
-    SegmentReader reader = readerPool.getIfExists(info);
+    SegmentReader reader = readerPool.getIfExists(info, IOContext.READ);
     try {
       if (reader != null) {
         return reader.numDeletedDocs();
@@ -687,7 +721,7 @@
       }
     } finally {
       if (reader != null) {
-        readerPool.release(reader);
+        readerPool.release(reader, IOContext.READ);
       }
     }
   }
@@ -2863,7 +2897,7 @@
         if (!mergingSegments.contains(info)) {
           segmentInfos.remove(info);
           if (readerPool != null) {
-            readerPool.drop(info);
+            readerPool.drop(info, IOContext.DEFAULT);
           }
         }
       }
@@ -3038,7 +3072,8 @@
     segmentInfos.applyMergeChanges(merge, dropSegment);
     
     if (dropSegment) {
-      readerPool.drop(merge.info);
+      IOContext context = new IOContext(new MergeInfo(merge.totalDocCount, merge.estimatedMergeBytes, merge.isExternal, merge.optimize));
+      readerPool.drop(merge.info, context);
     }
     
     if (infoStream != null) {
@@ -3288,7 +3323,8 @@
         }
       }
       if (readerPool != null) {
-        readerPool.drop(result.allDeleted);
+        IOContext context = new IOContext(new MergeInfo(merge.totalDocCount, merge.estimatedMergeBytes, merge.isExternal, merge.optimize));
+        readerPool.drop(result.allDeleted, context);
       }
       checkpoint();
     }
@@ -3379,7 +3415,8 @@
     for (int i = 0; i < numSegments; i++) {
       if (merge.readers.get(i) != null) {
         try {
-          anyChanges |= readerPool.release(merge.readers.get(i), drop);
+          IOContext context = new IOContext(new MergeInfo(merge.totalDocCount, merge.estimatedMergeBytes, merge.isExternal, merge.optimize));
+          anyChanges |= readerPool.release(merge.readers.get(i), drop, context);
         } catch (Throwable t) {
           if (th == null) {
             th = t;
@@ -3592,7 +3629,7 @@
         }
       } finally {
         synchronized(this) {
-          if (readerPool.release(mergedReader)) {
+          if (readerPool.release(mergedReader, context)) {
             // Must checkpoint after releasing the
             // mergedReader since it may have written a new
             // deletes file:
@@ -3655,7 +3692,7 @@
   /** @lucene.internal */
   public synchronized String segString(SegmentInfo info) throws IOException {
     StringBuilder buffer = new StringBuilder();
-    SegmentReader reader = readerPool.getIfExists(info);
+    SegmentReader reader = readerPool.getIfExists(info, IOContext.DEFAULT);
     try {
       if (reader != null) {
         buffer.append(reader.toString());
@@ -3667,7 +3704,7 @@
       }
     } finally {
       if (reader != null) {
-        readerPool.release(reader);
+        readerPool.release(reader, IOContext.DEFAULT);
       }
     }
     return buffer.toString();
Index: lucene/src/java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/SegmentReader.java	(revision 1144523)
+++ lucene/src/java/org/apache/lucene/index/SegmentReader.java	(working copy)
@@ -90,6 +90,11 @@
    */
   public static SegmentReader get(boolean readOnly, SegmentInfo si, int termInfosIndexDivisor, IOContext context) throws CorruptIndexException, IOException {
     // TODO should we check if readOnly and context combination makes sense like asserting that if read only we don't get a default?
+    if (readOnly) {
+      assert context != IOContext.DEFAULT;
+      //assert context.context == IOContext.Context.READ;
+      // Using the second assert checks for both READ and READONCE
+    }
     return get(readOnly, si.dir, si, true, termInfosIndexDivisor, context);
   }
 
