Index: lucene/spatial/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java
===================================================================
--- lucene/spatial/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java	(revision 1403281)
+++ lucene/spatial/src/java/org/apache/lucene/spatial/prefix/TermQueryPrefixTreeStrategy.java	(working copy)
@@ -55,11 +55,12 @@
     Shape shape = args.getShape();
     int detailLevel = grid.getLevelForDistance(args.resolveDistErr(ctx, distErrPct));
     List<Node> cells = grid.getNodes(shape, detailLevel, false);
-    TermsFilter filter = new TermsFilter();
+    Term[] terms = new Term[cells.size()];
+    int i = 0;
     for (Node cell : cells) {
-      filter.addTerm(new Term(getFieldName(), cell.getTokenString()));
+      terms[i++] = new Term(getFieldName(), cell.getTokenString());
     }
-    return filter;
+    return new TermsFilter(terms);
   }
 
 }
Index: lucene/CHANGES.txt
===================================================================
--- lucene/CHANGES.txt	(revision 1403281)
+++ lucene/CHANGES.txt	(working copy)
@@ -86,6 +86,9 @@
 * LUCENE-4504: Fix broken sort comparator in ValueSource.getSortField,
   used when sorting by a function query.  (Tom Shally via Robert Muir)
 
+* LUCENE-4511: TermsFilter might return wrong results if a field is not 
+  indexed or doesn't exist in the index. (Simon Willnauer)
+
 Optimizations
 
 * LUCENE-4443: Lucene41PostingsFormat no longer writes unnecessary offsets 
Index: lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java
===================================================================
--- lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java	(revision 1403281)
+++ lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java	(working copy)
@@ -14,6 +14,8 @@
 
 import java.io.IOException;
 import java.io.StringReader;
+import java.util.ArrayList;
+import java.util.List;
 
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -49,7 +51,7 @@
     * @see org.apache.lucene.xmlparser.FilterBuilder#process(org.w3c.dom.Element)
     */
   public Filter getFilter(Element e) throws ParserException {
-    TermsFilter tf = new TermsFilter();
+    List<Term> terms = new ArrayList<Term>();
     String text = DOMUtils.getNonBlankTextOrFail(e);
     String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
 
@@ -62,7 +64,7 @@
       while (ts.incrementToken()) {
         termAtt.fillBytesRef();
         term = new Term(fieldName, BytesRef.deepCopyOf(bytes));
-        tf.addTerm(term);
+        terms.add(term);
       }
       ts.end();
       ts.close();
@@ -70,6 +72,6 @@
     catch (IOException ioe) {
       throw new RuntimeException("Error constructing terms from index:" + ioe);
     }
-    return tf;
+    return new TermsFilter(terms);
   }
 }
Index: lucene/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java
===================================================================
--- lucene/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java	(revision 1403281)
+++ lucene/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java	(working copy)
@@ -17,7 +17,11 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.util.ArrayList;
 import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
@@ -27,6 +31,7 @@
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.DocIdSet;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.FixedBitSet;
@@ -35,20 +40,16 @@
 public class TermsFilterTest extends LuceneTestCase {
 
   public void testCachability() throws Exception {
-    TermsFilter a = new TermsFilter();
-    a.addTerm(new Term("field1", "a"));
-    a.addTerm(new Term("field1", "b"));
+    TermsFilter a = new TermsFilter(new Term("field1", "a"), new Term("field1", "b"));
     HashSet<Filter> cachedFilters = new HashSet<Filter>();
     cachedFilters.add(a);
-    TermsFilter b = new TermsFilter();
-    b.addTerm(new Term("field1", "a"));
-    b.addTerm(new Term("field1", "b"));
+    TermsFilter b = new TermsFilter(new Term("field1", "a"), new Term("field1", "b"));
 
     assertTrue("Must be cached", cachedFilters.contains(b));
-    b.addTerm(new Term("field1", "a")); //duplicate term
-    assertTrue("Must be cached", cachedFilters.contains(b));
-    b.addTerm(new Term("field1", "c"));
-    assertFalse("Must not be cached", cachedFilters.contains(b));
+    
+    //duplicate term
+    assertTrue("Must be cached", cachedFilters.contains(new TermsFilter(new Term("field1", "a"), new Term("field1", "a"), new Term("field1", "b"))));
+    assertFalse("Must not be cached", cachedFilters.contains(new TermsFilter(new Term("field1", "a"), new Term("field1", "a"), new Term("field1", "b"),  new Term("field1", "v"))));
   }
 
   public void testMissingTerms() throws Exception {
@@ -66,21 +67,21 @@
     AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
     w.close();
 
-    TermsFilter tf = new TermsFilter();
-    tf.addTerm(new Term(fieldName, "19"));
-    FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(context, context.reader().getLiveDocs());
-    assertEquals("Must match nothing", 0, bits.cardinality());
+    List<Term> terms = new ArrayList<Term>();
+    terms.add(new Term(fieldName, "19"));
+    FixedBitSet bits = (FixedBitSet) new TermsFilter(terms).getDocIdSet(context, context.reader().getLiveDocs());
+    assertNull("Must match nothing", bits);
 
-    tf.addTerm(new Term(fieldName, "20"));
-    bits = (FixedBitSet) tf.getDocIdSet(context, context.reader().getLiveDocs());
+    terms.add(new Term(fieldName, "20"));
+    bits = (FixedBitSet) new TermsFilter(terms).getDocIdSet(context, context.reader().getLiveDocs());
     assertEquals("Must match 1", 1, bits.cardinality());
 
-    tf.addTerm(new Term(fieldName, "10"));
-    bits = (FixedBitSet) tf.getDocIdSet(context, context.reader().getLiveDocs());
+    terms.add(new Term(fieldName, "10"));
+    bits = (FixedBitSet) new TermsFilter(terms).getDocIdSet(context, context.reader().getLiveDocs());
     assertEquals("Must match 2", 2, bits.cardinality());
 
-    tf.addTerm(new Term(fieldName, "00"));
-    bits = (FixedBitSet) tf.getDocIdSet(context, context.reader().getLiveDocs());
+    terms.add(new Term(fieldName, "00"));
+    bits = (FixedBitSet) new TermsFilter(terms).getDocIdSet(context, context.reader().getLiveDocs());
     assertEquals("Must match 2", 2, bits.cardinality());
 
     reader.close();
@@ -106,13 +107,16 @@
     IndexReader reader2 = w2.getReader();
     w2.close();
     
-    TermsFilter tf = new TermsFilter();
-    tf.addTerm(new Term(fieldName, "content1"));
-    
+    TermsFilter tf = new TermsFilter(new Term(fieldName, "content1"));
     MultiReader multi = new MultiReader(reader1, reader2);
     for (AtomicReaderContext context : multi.leaves()) {
-      FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(context, context.reader().getLiveDocs());
-      assertTrue("Must be >= 0", bits.cardinality() >= 0);      
+      DocIdSet docIdSet = tf.getDocIdSet(context, context.reader().getLiveDocs());
+      if (context.reader().docFreq(new Term(fieldName, "content1")) == 0) {
+        assertNull(docIdSet);
+      } else {
+        FixedBitSet bits = (FixedBitSet) docIdSet;
+        assertTrue("Must be >= 0", bits.cardinality() >= 0);      
+      }
     }
     multi.close();
     reader1.close();
@@ -120,5 +124,72 @@
     rd1.close();
     rd2.close();
   }
+  
+  public void testFieldNotPresent() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    int num = atLeast(3);
+    int skip = random().nextInt(num);
+    List<Term> terms = new ArrayList<Term>();
+    for (int i = 0; i < num; i++) {
+      terms.add(new Term("field" + i, "content1"));
+      Document doc = new Document();
+      if (skip == i) {
+        continue;
+      }
+      doc.add(newStringField("field" + i, "content1", Field.Store.YES));
+      w.addDocument(doc);  
+    }
+    
+    w.forceMerge(1);
+    IndexReader reader = w.getReader();
+    w.close();
+    assertEquals(1, reader.leaves().size());
+    
+    
+    
+    AtomicReaderContext context = reader.leaves().get(0);
+    TermsFilter tf = new TermsFilter(terms);
 
+    FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(context, context.reader().getLiveDocs());
+    assertEquals("Must be num fields - 1 since we skip only one field", num-1, bits.cardinality());  
+    reader.close();
+    dir.close();
+  }
+  
+  public void testSkipField() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
+    int num = atLeast(10);
+    Set<Term> terms = new HashSet<Term>();
+    for (int i = 0; i < num; i++) {
+      String field = "field" + random().nextInt(100);
+      terms.add(new Term(field, "content1"));
+      Document doc = new Document();
+      doc.add(newStringField(field, "content1", Field.Store.YES));
+      w.addDocument(doc);
+    }
+    int randomFields = random().nextInt(10);
+    for (int i = 0; i < randomFields; i++) {
+      while (true) {
+        String field = "field" + random().nextInt(100);
+        Term t = new Term(field, "content1");
+        if (!terms.contains(t)) {
+          terms.add(t);
+          break;
+        }
+      }
+    }
+    w.forceMerge(1);
+    IndexReader reader = w.getReader();
+    w.close();
+    assertEquals(1, reader.leaves().size());
+    AtomicReaderContext context = reader.leaves().get(0);
+    TermsFilter tf = new TermsFilter(terms);
+
+    FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(context, context.reader().getLiveDocs());
+    assertEquals(context.reader().numDocs(), bits.cardinality());  
+    reader.close();
+    dir.close();
+  }
 }
Index: lucene/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java
===================================================================
--- lucene/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java	(revision 1403281)
+++ lucene/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java	(working copy)
@@ -82,10 +82,7 @@
   }
 
   private Filter getTermsFilter(String field, String text) {
-    TermsFilter tf = new TermsFilter();
-    tf.addTerm(new Term(field, text));
-
-    return tf;
+    return new TermsFilter(new Term(field, text));
   }
   
   private Filter getWrappedTermQuery(String field, String text) {
Index: lucene/queries/src/java/org/apache/lucene/queries/TermsFilter.java
===================================================================
--- lucene/queries/src/java/org/apache/lucene/queries/TermsFilter.java	(revision 1403281)
+++ lucene/queries/src/java/org/apache/lucene/queries/TermsFilter.java	(working copy)
@@ -26,8 +26,8 @@
 import org.apache.lucene.util.FixedBitSet;
 
 import java.io.IOException;
-import java.util.Set;
-import java.util.TreeSet;
+import java.util.Arrays;
+import java.util.Collection;
 
 /**
  * Constructs a filter for docs matching any of the terms added to this class.
@@ -38,58 +38,103 @@
  */
 public class TermsFilter extends Filter {
 
-  private final Set<Term> terms = new TreeSet<Term>();
-
+  private final Term[] filterTerms;
+  private final boolean[] resetTermsEnum;// true if the enum must be reset when building the bitset
+  private final int length;
+  
   /**
-   * Adds a term to the list of acceptable terms
+   * Creates a new {@link TermsFilter} from the given collection. The collection
+   * can contain duplicate terms and multiple fields.
    */
-  public void addTerm(Term term) {
-    terms.add(term);
+  public TermsFilter(final Collection<Term> terms) {
+    this(terms.toArray(new Term[terms.size()]));
   }
-
-/* (non-Javadoc)
-   * @see org.apache.lucene.search.Filter#getDocIdSet(org.apache.lucene.index.IndexReader)
+  
+  /**
+   * Creates a new {@link TermsFilter} from the given array. The array can
+   * contain duplicate terms and multiple fields.
    */
+  public TermsFilter(final Term... terms) {
+    if (terms == null || terms.length == 0) {
+      throw new IllegalArgumentException("TermsFilter requires at least one term");
+    }
+    Arrays.sort(terms);
+    this.filterTerms = new Term[terms.length];
+    this.resetTermsEnum = new boolean[terms.length];
+    int index = 0;
+    for (int i = 0; i < terms.length; i++) {
+      Term currentTerm = terms[i];
+      boolean fieldChanged = true;
+      if (index > 0) {
+        // deduplicate
+        if (filterTerms[index-1].field().equals(currentTerm.field())) {
+          fieldChanged = false;
+          if (filterTerms[index-1].bytes().bytesEquals(currentTerm.bytes())){
+            continue;            
+          }
+        }
+      }
+      this.filterTerms[index] = currentTerm;
+      this.resetTermsEnum[index] = index == 0 || fieldChanged; // mark index 0 so we have a clear path in the iteration
+      index++;
+    }
+    assert resetTermsEnum[0];
+    length = index;
+  }
 
+  
   @Override
   public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
-    AtomicReader reader = context.reader();
-    FixedBitSet result = new FixedBitSet(reader.maxDoc());
-    Fields fields = reader.fields();
-
+    /*
+     * TODO: we should explore if it is worth to build the union of the terms in
+     * an automaton an call intersect on the termsenum if the density is high
+     */
+    final AtomicReader reader = context.reader();
+    FixedBitSet result = null;  // lazy init if needed - no need to create a big bitset ahead of time
+    final Fields fields = reader.fields();
     if (fields == null) {
       return result;
     }
-
-    BytesRef br = new BytesRef();
-    String lastField = null;
-    Terms termsC;
+    Terms terms = null;
     TermsEnum termsEnum = null;
     DocsEnum docs = null;
-    for (Term term : terms) {
-      if (!term.field().equals(lastField)) {
-        termsC = fields.terms(term.field());
-        if (termsC == null) {
-          return result;
+    for (int i = 0; i < length; i++) {
+      final Term term = this.filterTerms[i];
+      if (resetTermsEnum[i]) {
+        if ((terms = fields.terms(term.field())) == null) {
+          i = skipToNextField(i+1, length); // skip to the next field since this field is not indexed
+          continue;
         }
-        termsEnum = termsC.iterator(null);
-        lastField = term.field();
+        termsEnum = terms.iterator(termsEnum); // this won't return null
       }
-
-      if (terms != null) { // TODO this check doesn't make sense, decide which variable its supposed to be for
-        br.copyBytes(term.bytes());
-        assert termsEnum != null;
-        if (termsEnum.seekExact(br,true)) {
-          docs = termsEnum.docs(acceptDocs, docs, 0);
-          while (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+      assert termsEnum != null;
+      if (termsEnum.seekExact(term.bytes(), false)) { // don't use case we could pollute the cache here easily
+        docs = termsEnum.docs(acceptDocs, docs, 0); // no freq if we don't need them
+        if (result == null) {
+          if (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+            result = new FixedBitSet(reader.maxDoc());
+            // lazy init but don't do it in the hot loop since we could read many docs
             result.set(docs.docID());
           }
         }
+        while (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+          result.set(docs.docID());
+        }
       }
     }
     return result;
   }
 
+  private final int skipToNextField(int index, int length) {
+    for (int i = index; i < length; i++) {
+      if (resetTermsEnum[i]) {
+        return i-1;
+      }
+    }
+    return length;
+  }
+    
+
   @Override
   public boolean equals(Object obj) {
     if (this == obj) {
@@ -98,19 +143,42 @@
     if ((obj == null) || (obj.getClass() != this.getClass())) {
       return false;
     }
-
     TermsFilter test = (TermsFilter) obj;
-    return (terms == test.terms ||
-        (terms != null && terms.equals(test.terms)));
+    if (filterTerms != test.filterTerms) {
+      if (length == test.length) {
+        for (int i = 0; i < length; i++) {
+          // can not be null!
+          if (!filterTerms[i].equals(test.filterTerms[i])) {
+            return false;
+          }
+        }
+      } else {
+        return false;
+      }
+    }
+    return true;
+    
   }
 
   @Override
   public int hashCode() {
     int hash = 9;
-    for (Term term : terms) {
-      hash = 31 * hash + term.hashCode();
+    for (int i = 0; i < length; i++) {
+      hash = 31 * hash + filterTerms[i].hashCode();
     }
     return hash;
   }
+  
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder();
+    for (int i = 0; i < length; i++) {
+      if (builder.length() > 0) {
+        builder.append(' ');
+      }
+      builder.append(filterTerms[i]);
+    }
+    return builder.toString();
+  }
 
 }
