Index: solr/src/java/org/apache/solr/search/SolrIndexSearcher.java
===================================================================
--- solr/src/java/org/apache/solr/search/SolrIndexSearcher.java	(revision 1050839)
+++ solr/src/java/org/apache/solr/search/SolrIndexSearcher.java	(working copy)
@@ -27,6 +27,7 @@
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.OpenBitSet;
+import org.apache.lucene.util.ReaderUtil;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.core.SolrConfig;
@@ -71,6 +72,11 @@
   private final SolrIndexReader reader;
   private final boolean closeReader;
 
+  public final MultiFields multiFields;
+  public final ReaderUtil.Slice[] readerSubs;
+  public final Fields[] fieldSubs;
+  public final Bits[] deletedDocs;
+
   private final int queryResultWindowSize;
   private final int queryResultMaxDocsCached;
   private final boolean useFilterForSortedQuery;
@@ -121,7 +127,7 @@
   }
 
   /** Creates a searcher searching the provided index. */
-  public SolrIndexSearcher(SolrCore core, IndexSchema schema, String name, IndexReader r, boolean enableCache) {
+  public SolrIndexSearcher(SolrCore core, IndexSchema schema, String name, IndexReader r, boolean enableCache) throws IOException {
     this(core, schema,name,r, false, enableCache);
   }
 
@@ -137,7 +143,7 @@
     return sir;
   }
 
-  public SolrIndexSearcher(SolrCore core, IndexSchema schema, String name, IndexReader r, boolean closeReader, boolean enableCache) {
+  public SolrIndexSearcher(SolrCore core, IndexSchema schema, String name, IndexReader r, boolean closeReader, boolean enableCache) throws IOException {
     super(wrap(r));
     this.reader = (SolrIndexReader)super.getIndexReader();
     this.core = core;
@@ -147,6 +153,20 @@
 
     SolrIndexReader.setSearcher(reader, this);
 
+    // set up MultiFields
+    SolrIndexReader[] subReaders = reader.getLeafReaders();
+    readerSubs = new ReaderUtil.Slice[subReaders.length];
+    fieldSubs = new Fields[subReaders.length];
+    deletedDocs = new Bits[subReaders.length];
+
+    for (int i=0; i<subReaders.length; i++) {
+      SolrIndexReader subReader = subReaders[i];
+      readerSubs[i] = new ReaderUtil.Slice(subReader.getBase(), subReader.maxDoc(), i);
+      fieldSubs[i] = MultiFields.getFields(subReader); // hopefully segment level
+      deletedDocs[i] = MultiFields.getDeletedDocs(subReader); // hopefully segment level
+    }
+    multiFields = new MultiFields(fieldSubs, readerSubs);
+
     if (r.directory() instanceof FSDirectory) {
       FSDirectory fsDirectory = (FSDirectory) r.directory();
       indexDir = fsDirectory.getDirectory().getAbsolutePath();
@@ -493,6 +513,7 @@
    * @return the first document number containing the term
    */
   public int getFirstMatch(Term t) throws IOException {
+    // TODO: do this per-segment
     Fields fields = MultiFields.getFields(reader);
     if (fields == null) return -1;
     Terms terms = fields.terms(t.field());
Index: solr/src/java/org/apache/solr/request/SimpleFacets.java
===================================================================
--- solr/src/java/org/apache/solr/request/SimpleFacets.java	(revision 1050839)
+++ solr/src/java/org/apache/solr/request/SimpleFacets.java	(working copy)
@@ -628,8 +628,8 @@
       startTermBytes = new BytesRef(indexedPrefix);
     }
 
-    Fields fields = MultiFields.getFields(r);
-    Terms terms = fields==null ? null : fields.terms(field);
+    Fields fields = searcher.multiFields;
+    Terms terms = fields.terms(field);
     TermsEnum termsEnum = null;
     BytesRef term = null;
     if (terms != null) {
@@ -650,6 +650,9 @@
       }
     }
 
+    MultiTermsEnum multiTermsEnum = termsEnum instanceof MultiTermsEnum ? (MultiTermsEnum)termsEnum : null;
+    assert multiTermsEnum != null || term == null; // we should have a multiTermsEnum unless there we no matches
+    
     Term template = new Term(field);
     CharArr spare = new CharArr();
 
@@ -680,44 +683,23 @@
           } else {
             // iterate over TermDocs to calculate the intersection
             c=0;
-            final BulkPostingsEnum docsEnum = deState.bulkPostings = deState.termsEnum.bulkPostings(deState.bulkPostings, false, false);            
 
-            /*** do per-seg
-            // TODO: specialize when base docset is a bitset or hash set (skipDocs)?  or does it matter for this?
-            // TODO: do this per-segment for better efficiency (MultiDocsEnum just uses base class impl)
-            // TODO: would passing deleted docs lead to better efficiency over checking the fastForRandomSet?
-            docsEnum = termsEnum.docs(null, docsEnum);
-
-            if (docsEnum instanceof MultiDocsEnum) {
-              MultiDocsEnum.EnumWithSlice[] subs = ((MultiDocsEnum)docsEnum).getSubs();
-              int numSubs = ((MultiDocsEnum)docsEnum).getNumSubs();
-              for (int subindex = 0; subindex<numSubs; subindex++) {
-                MultiDocsEnum.EnumWithSlice sub = subs[subindex];
-                if (sub.docsEnum == null) continue;
-                DocsEnum.BulkReadResult bulk = sub.docsEnum.getBulkResult();
-                int base = sub.slice.start;
-                for (;;) {
-                  int nDocs = sub.docsEnum.read();
-                  if (nDocs == 0) break;
-                  int[] docArr = bulk.docs.ints;  // this might be movable outside the loop, but perhaps not worth the risk.
-                  int end = bulk.docs.offset + nDocs;
-                  for (int i=bulk.docs.offset; i<end; i++) {
-                    if (fastForRandomSet.exists(docArr[i]+base)) c++;
-                  }
-                }
-              }
-            } else
-            ***/
-            {
-              int docsLeft = df;
-              BulkPostingsEnum.BlockReader docDeltasReader = docsEnum.getDocDeltasReader();
+            MultiTermsEnum.TermsEnumWithSlice[] subMatches = multiTermsEnum.getMatchArray();
+            int nEnums = multiTermsEnum.getMatchCount();
+            for (int i=0; i<nEnums; i++) {
+              MultiTermsEnum.TermsEnumWithSlice match = subMatches[i];
+              BulkPostingsEnum bulkPostings = match.bulkPostings = match.terms.bulkPostings(match.bulkPostings, false, false);
+              BulkPostingsEnum.BlockReader docDeltasReader = bulkPostings.getDocDeltasReader();
+              
+              int docsLeft = match.terms.docFreq();
+              assert docsLeft > 0;
               int[] deltas = docDeltasReader.getBuffer();
               int docPointer = docDeltasReader.offset();
               int docPointerMax = docDeltasReader.end();
               // assert docPointer < docPointerMax;
               if (docPointerMax - docPointer > docsLeft) docPointerMax = docPointer + docsLeft;
               docsLeft -= docPointerMax - docPointer;
-              int doc = 0;
+              int doc = match.subSlice.start;
 
               for (;;) {
                 while (docPointer < docPointerMax) {
Index: lucene/src/java/org/apache/lucene/index/MultiTermsEnum.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/MultiTermsEnum.java	(revision 1050839)
+++ lucene/src/java/org/apache/lucene/index/MultiTermsEnum.java	(working copy)
@@ -441,11 +441,14 @@
     }
   }
 
-  private final static class TermsEnumWithSlice {
-    private final ReaderUtil.Slice subSlice;
-    private TermsEnum terms;
+  public final static class TermsEnumWithSlice {
+    public final ReaderUtil.Slice subSlice;
+    public TermsEnum terms;
     public BytesRef current;
 
+    /** initially null, settable by clients for reuse */
+    public BulkPostingsEnum bulkPostings;
+
     public TermsEnumWithSlice(ReaderUtil.Slice subSlice) {
       this.subSlice = subSlice;
       assert subSlice.length >= 0: "length=" + subSlice.length;
