Index: modules/join/src/java/org/apache/lucene/search/join/JoinQuery.java
===================================================================
--- modules/join/src/java/org/apache/lucene/search/join/JoinQuery.java	(revision )
+++ modules/join/src/java/org/apache/lucene/search/join/JoinQuery.java	(revision )
@@ -0,0 +1,330 @@
+package org.apache.lucene.search.join;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.*;
+
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * A query that encapsulates another query and joins / links documents that are related to the documents that have
+ * matched to the encapsulated query with the same "from" and "to" indexed field values. The "to" documents are
+ * returned as result.
+ * <p/>
+ * This query uses a top level approach for matching from and to documents by indexed field values. The downside of this
+ * is that in a sharded environment not all documents might get joined / linked.
+ */
+public class JoinQuery extends Query {
+
+  private final IndexSearcher toSearcher;
+  private final Query actualQuery;
+  private final Bits preComputedFromDocs;
+  private final String fromField;
+  private final String toField;
+
+  /**
+   * @param fromField           The field to join documents matching the actual query from.
+   * @param toField             The field to join documents that have matching terms with documents having the same
+   *                            fromField terms.
+   * @param actualQuery         The actual query executed to collect documents.
+   * @param toSearcher          Match documents with toField via another searcher. For example a searcher that reads
+   *                            from a different {@link org.apache.lucene.store.Directory}. Optional parameter.
+   * @param preComputedFromDocs The bitset for documents matching the actual query. If this is set the actual query
+   *                            isn't executed. Optional parameter.
+   */
+  public JoinQuery(String fromField, String toField, Query actualQuery, IndexSearcher toSearcher, Bits preComputedFromDocs) {
+    this.preComputedFromDocs = preComputedFromDocs;
+    this.fromField = fromField;
+    this.toField = toField;
+    this.actualQuery = actualQuery;
+    this.toSearcher = toSearcher;
+  }
+
+  @Override
+  public Query rewrite(IndexReader reader) throws IOException {
+    Query newQ = actualQuery.rewrite(reader);
+    if (newQ == actualQuery) {
+      return this;
+    } else {
+      return new JoinQuery(fromField, toField, newQ, toSearcher, preComputedFromDocs);
+    }
+  }
+
+  @Override
+  public void extractTerms(Set<Term> terms) {
+    actualQuery.extractTerms(terms);
+  }
+
+  public Weight createWeight(IndexSearcher fromSearcher) throws IOException {
+    if (preComputedFromDocs == null) {
+      TopLevelFixedBitSetCollector bitSetCollector = new TopLevelFixedBitSetCollector(fromSearcher.getIndexReader().maxDoc());
+      fromSearcher.search(actualQuery, bitSetCollector);
+      return new JoinQueryWeight(bitSetCollector.bitSet, fromSearcher, toSearcher);
+    } else {
+      return new JoinQueryWeight(preComputedFromDocs, fromSearcher, toSearcher);
+    }
+  }
+
+  @Override
+  public String toString(String field) {
+    return "{!join from=" + fromField + " to=" + toField + " toSearcher=" + toSearcher + "}" + actualQuery.toString();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+
+    JoinQuery query = (JoinQuery) o;
+
+    return actualQuery.equals(query.actualQuery)
+        && fromField.equals(query.fromField)
+        && fromField.equals(query.fromField)
+        && getBoost() == query.getBoost()
+        && (preComputedFromDocs != null ? preComputedFromDocs.equals(query.preComputedFromDocs) : query.preComputedFromDocs == null)
+        && (toSearcher != null ? toSearcher.equals(query.toSearcher) : query.toSearcher == null);
+  }
+
+  @Override
+  public int hashCode() {
+    int result = 31 * actualQuery.hashCode();
+    result = 31 * result + fromField.hashCode();
+    result = 31 * result + toField.hashCode();
+    result = 31 * result + (toSearcher != null ? toSearcher.hashCode() : 0);
+    result = 31 * result + (preComputedFromDocs != null ? preComputedFromDocs.hashCode() : 0);
+    return result;
+  }
+
+
+  protected class JoinQueryWeight extends Weight {
+
+    private float queryNorm;
+    private float queryWeight;
+    private final Bits fromDocs;
+    private final IndexSearcher fromSearcher;
+    private final IndexSearcher toSearcher;
+
+    private FixedBitSet joinResult;
+
+    public JoinQueryWeight(Bits fromDocs, IndexSearcher fromSearcher, IndexSearcher toSearcher) {
+      this.fromDocs = fromDocs;
+      this.fromSearcher = fromSearcher;
+      this.toSearcher = toSearcher == null ? fromSearcher : toSearcher;
+    }
+
+    @Override
+    public Explanation explain(IndexReader.AtomicReaderContext context, int doc) throws IOException {
+      Scorer scorer = scorer(context, true, false, context.reader.getLiveDocs());
+      boolean exists = scorer.advance(doc) == doc;
+
+      ComplexExplanation explanation = new ComplexExplanation();
+
+      if (exists) {
+        explanation.setDescription(this.toString() + " , product of:");
+        explanation.setValue(queryWeight);
+        explanation.setMatch(Boolean.TRUE);
+        explanation.addDetail(new Explanation(getBoost(), "boost"));
+        explanation.addDetail(new Explanation(queryNorm, "queryNorm"));
+      } else {
+        explanation.setDescription(this.toString() + " doesn't match id " + doc);
+        explanation.setValue(0);
+        explanation.setMatch(Boolean.FALSE);
+      }
+      return explanation;
+    }
+
+    @Override
+    public Query getQuery() {
+      return JoinQuery.this;
+    }
+
+    @Override
+    public float getValueForNormalization() throws IOException {
+      queryWeight = getBoost();
+      return queryWeight * queryWeight;
+    }
+
+    @Override
+    public void normalize(float norm, float topLevelBoost) {
+      this.queryNorm = norm * topLevelBoost;
+      queryWeight *= this.queryNorm;
+    }
+
+    @Override
+    public Scorer scorer(IndexReader.AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
+      // We only need to synchronize this if segments are searched concurrently.
+      if (joinResult == null) {
+        joinResult = join(fromSearcher.getIndexReader(), toSearcher.getIndexReader());
+      }
+
+
+      DocIdSetIterator iterator;
+      if (context.isTopLevel) {
+        iterator = joinResult.iterator();
+      } else {
+        iterator = new AdjustedDocIdSetIterator(context, joinResult.iterator());
+      }
+      return new JoinScorer(this, iterator, getBoost());
+    }
+
+    protected FixedBitSet join(IndexReader fromIr, IndexReader toIr) throws IOException {
+      Bits fromLiveDocs = MultiFields.getLiveDocs(fromIr);
+      Bits toLiveDocs = fromIr == toIr ? fromLiveDocs : MultiFields.getLiveDocs(toIr);
+      TermsEnum fromTermsEnum = MultiFields.getTerms(fromIr, fromField).iterator(null);
+      TermsEnum toTermsEnum = MultiFields.getTerms(toIr, toField).iterator(null);
+      DocsEnum fromDocsEnum = null;
+      DocsEnum toDocsEnum = null;
+      FixedBitSet resultBitSet = new FixedBitSet(toIr.maxDoc());
+
+      for (BytesRef term = fromTermsEnum.next(); term != null; term = fromTermsEnum.next()) {
+        if (fromTermsEnum.docFreq() == 0) {
+          continue;
+        }
+
+        boolean matches = false;
+        fromDocsEnum = fromTermsEnum.docs(fromLiveDocs, fromDocsEnum, false);
+        for (int doc = fromDocsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = fromDocsEnum.nextDoc()) {
+          if (fromDocs.get(doc)) {
+            matches = true;
+            break;
+          }
+        }
+
+        if (!matches) {
+          continue;
+        }
+
+        if (toTermsEnum.seekExact(term, true)) {
+          toDocsEnum = toTermsEnum.docs(toLiveDocs, toDocsEnum, false);
+          for (int doc = toDocsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = toDocsEnum.nextDoc()) {
+            resultBitSet.set(doc);
+          }
+        }
+      }
+      return resultBitSet;
+    }
+
+  }
+
+  protected static class JoinScorer extends Scorer {
+
+    final DocIdSetIterator iter;
+    final float score;
+
+    public JoinScorer(Weight w, DocIdSetIterator iter, float score) throws IOException {
+      super(w);
+      this.score = score;
+      this.iter = iter == null ? DocIdSet.EMPTY_DOCIDSET.iterator() : iter;
+    }
+
+    public int nextDoc() throws IOException {
+      return iter.nextDoc();
+    }
+
+    public int docID() {
+      return iter.docID();
+    }
+
+    public float score() throws IOException {
+      return score;
+    }
+
+    public int advance(int target) throws IOException {
+      return iter.advance(target);
+    }
+
+  }
+
+  private static class TopLevelFixedBitSetCollector extends Collector {
+
+    private final FixedBitSet bitSet;
+    private IndexReader.AtomicReaderContext context;
+
+    private TopLevelFixedBitSetCollector(int maxDoc) {
+      this.bitSet = new FixedBitSet(maxDoc);
+    }
+
+    @Override
+    public void setScorer(Scorer scorer) throws IOException {
+    }
+
+    @Override
+    public void collect(int doc) throws IOException {
+      bitSet.set(doc + context.docBase);
+    }
+
+    @Override
+    public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
+      this.context = context;
+    }
+
+    @Override
+    public boolean acceptsDocsOutOfOrder() {
+      return true;
+    }
+  }
+
+  private static class AdjustedDocIdSetIterator extends DocIdSetIterator {
+
+    private final int base;
+    private final int max;
+    private final DocIdSetIterator real;
+
+    private int adjustedDoc = -1;
+    private int realDoc = -1;
+
+    private AdjustedDocIdSetIterator(IndexReader.AtomicReaderContext context, DocIdSetIterator real) throws IOException {
+      this.base = context.docBase;
+      this.max = context.docBase + context.reader.maxDoc();
+      this.real = real;
+      if (base != 0) {
+        this.realDoc = real.advance(base);
+      }
+    }
+
+    @Override
+    public int docID() {
+      return adjustedDoc;
+    }
+
+    @Override
+    public int nextDoc() throws IOException {
+      realDoc = realDoc == -1 ? real.nextDoc() : realDoc;
+      adjustedDoc = (realDoc >= base && realDoc < max) ? realDoc - base : NO_MORE_DOCS;
+      realDoc = -1;
+      return adjustedDoc;
+    }
+
+    @Override
+    public int advance(int target) throws IOException {
+      if (target == NO_MORE_DOCS) {
+        return adjustedDoc = NO_MORE_DOCS;
+      }
+
+      realDoc = real.advance(target + base);
+      adjustedDoc = (realDoc >= base && realDoc < max) ? realDoc - base : NO_MORE_DOCS;
+      realDoc = -1;
+      return adjustedDoc;
+    }
+
+  }
+
+}
Index: modules/join/src/test/org/apache/lucene/search/join/TestJoinQuery.java
===================================================================
--- modules/join/src/test/org/apache/lucene/search/join/TestJoinQuery.java	(revision )
+++ modules/join/src/test/org/apache/lucene/search/join/TestJoinQuery.java	(revision )
@@ -0,0 +1,333 @@
+package org.apache.lucene.search.join;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+
+public class TestJoinQuery extends LuceneTestCase {
+
+  public void testSimple() throws Exception {
+    final String idField = "id";
+    final String toField = "productId";
+
+    Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(
+        random,
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT,
+            new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+
+    // 0
+    Document doc = new Document();
+    doc.add(new Field("description", "random text", TextField.TYPE_STORED));
+    doc.add(new Field("name", "name1", TextField.TYPE_STORED));
+    doc.add(new Field(idField, "1", TextField.TYPE_STORED));
+    w.addDocument(doc);
+
+    // 1
+    doc = new Document();
+    doc.add(new Field("price", "10.0", TextField.TYPE_STORED));
+    doc.add(new Field(idField, "2", TextField.TYPE_STORED));
+    doc.add(new Field(toField, "1", TextField.TYPE_STORED));
+    w.addDocument(doc);
+
+    // 2
+    doc = new Document();
+    doc.add(new Field("price", "20.0", TextField.TYPE_STORED));
+    doc.add(new Field(idField, "3", TextField.TYPE_STORED));
+    doc.add(new Field(toField, "1", TextField.TYPE_STORED));
+    w.addDocument(doc);
+
+    // 3
+    doc = new Document();
+    doc.add(new Field("description", "more random text", TextField.TYPE_STORED));
+    doc.add(new Field("name", "name2", TextField.TYPE_STORED));
+    doc.add(new Field(idField, "4", TextField.TYPE_STORED));
+    w.addDocument(doc);
+    w.commit();
+
+    // 4
+    doc = new Document();
+    doc.add(new Field("price", "10.0", TextField.TYPE_STORED));
+    doc.add(new Field(idField, "5", TextField.TYPE_STORED));
+    doc.add(new Field(toField, "4", TextField.TYPE_STORED));
+    w.addDocument(doc);
+
+    // 5
+    doc = new Document();
+    doc.add(new Field("price", "20.0", TextField.TYPE_STORED));
+    doc.add(new Field(idField, "6", TextField.TYPE_STORED));
+    doc.add(new Field(toField, "4", TextField.TYPE_STORED));
+    w.addDocument(doc);
+
+    IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
+    w.close();
+
+    // Search for product
+    JoinQuery joinQuery = new JoinQuery(idField, toField, new TermQuery(new Term("name", "name2")), null, null);
+
+    TopDocs result = indexSearcher.search(joinQuery, 10);
+    assertEquals(2, result.totalHits);
+    assertEquals(4, result.scoreDocs[0].doc);
+    assertEquals(5, result.scoreDocs[1].doc);
+
+    joinQuery = new JoinQuery(idField, toField, new TermQuery(new Term("name", "name1")), null, null);
+    result = indexSearcher.search(joinQuery, 10);
+    assertEquals(2, result.totalHits);
+    assertEquals(1, result.scoreDocs[0].doc);
+    assertEquals(2, result.scoreDocs[1].doc);
+
+    // Search for offer
+    joinQuery = new JoinQuery(toField, idField, new TermQuery(new Term("id", "5")), null, null);
+    result = indexSearcher.search(joinQuery, 10);
+    assertEquals(1, result.totalHits);
+    assertEquals(3, result.scoreDocs[0].doc);
+
+    indexSearcher.getIndexReader().close();
+    dir.close();
+  }
+
+  @Test
+  public void testRandomJoin() throws Exception {
+    int maxIndexIter = _TestUtil.nextInt(random, 6, 12);
+    for (int indexIter = 1; indexIter <= maxIndexIter; indexIter++) {
+      if (VERBOSE) {
+        System.out.println("indexIter=" + indexIter);
+      }
+      Directory dir = newDirectory();
+      RandomIndexWriter w = new RandomIndexWriter(
+          random,
+          dir,
+          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy())
+      );
+      int numberOfDocumentsToIndex = _TestUtil.nextInt(random, 87, 764);
+      IndexIterationContext context = createContext(numberOfDocumentsToIndex, w);
+
+      IndexReader topLevelReader = w.getReader();
+      w.close();
+      int maxSearchIter = _TestUtil.nextInt(random, 13, 26);
+      for (int searchIter = 1; searchIter <= maxSearchIter; searchIter++) {
+        if (VERBOSE) {
+          System.out.println("searchIter=" + searchIter);
+        }
+        IndexSearcher indexSearcher = newSearcher(topLevelReader);
+
+        int r = random.nextInt(context.randomUniqueValues.length);
+        boolean from = context.randomFrom[r];
+        String randomValue = context.randomUniqueValues[r];
+        FixedBitSet expectedResult = createExpectedResult(randomValue, from, indexSearcher.getIndexReader(), context);
+
+        Query actualQuery = new TermQuery(new Term("value", randomValue));
+        if (VERBOSE) {
+          System.out.println("actualQuery=" + actualQuery);
+        }
+        JoinQuery joinQuery;
+        if (from) {
+          joinQuery = new JoinQuery("from", "to", actualQuery, null, null);
+        } else {
+          joinQuery = new JoinQuery("to", "from", actualQuery, null, null);
+        }
+        if (VERBOSE) {
+          System.out.println("joinQuery=" + joinQuery);
+        }
+
+        // Need to know all documents that have matches. TopDocs doesn't give me that and then I'd be also testing TopDocsCollector...
+        final FixedBitSet actualResult = new FixedBitSet(indexSearcher.getIndexReader().maxDoc());
+        indexSearcher.search(joinQuery, new Collector() {
+          int docBase;
+
+          public void setScorer(Scorer scorer) throws IOException {
+          }
+
+          public void collect(int doc) throws IOException {
+            actualResult.set(doc + docBase);
+          }
+
+          public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
+            docBase = context.docBase;
+          }
+
+          public boolean acceptsDocsOutOfOrder() {
+            return true;
+          }
+        });
+
+        if (VERBOSE) {
+          System.out.println("expected cardinality:" + expectedResult.cardinality());
+          DocIdSetIterator iterator = expectedResult.iterator();
+          for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
+            System.out.println(String.format("Expected doc[%d] with id value %s", doc, indexSearcher.doc(doc).get("id")));
+          }
+          System.out.println("actual cardinality:" + actualResult.cardinality());
+          iterator = actualResult.iterator();
+          for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
+            System.out.println(String.format("Actual doc[%d] with id value %s", doc, indexSearcher.doc(doc).get("id")));
+          }
+        }
+
+        assertEquals(expectedResult, actualResult);
+      }
+      topLevelReader.close();
+      dir.close();
+    }
+  }
+
+  private IndexIterationContext createContext(int nDocs, RandomIndexWriter writer) throws IOException {
+    return createContext(nDocs, writer, writer);
+  }
+
+  private IndexIterationContext createContext(int nDocs, RandomIndexWriter fromWriter, RandomIndexWriter toWriter) throws IOException {
+    IndexIterationContext context = new IndexIterationContext();
+    int numRandomValues = nDocs / 2;
+    context.randomUniqueValues = new String[numRandomValues];
+    Set<String> trackSet= new HashSet<String>();
+    context.randomFrom = new boolean[numRandomValues];
+    for (int i = 0; i < numRandomValues; i++) {
+      String uniqueRandomValue;
+      do {
+        uniqueRandomValue = _TestUtil.randomRealisticUnicodeString(random);
+      } while ("".equals(uniqueRandomValue) || trackSet.contains(uniqueRandomValue));
+      // Generate unique values and empty strings aren't allowed.
+      trackSet.add(uniqueRandomValue);
+      context.randomFrom[i] = random.nextBoolean();
+      context.randomUniqueValues[i] = uniqueRandomValue;
+    }
+
+    for (int i = 0; i < nDocs; i++) {
+      String id = Integer.toString(i);
+      int randomI = random.nextInt(context.randomUniqueValues.length);
+      String value = context.randomUniqueValues[randomI];
+      String linkValue = context.randomUniqueValues[random.nextInt(context.randomUniqueValues.length)];
+      Document document = new Document();
+      document.add(newField(random, "id", id, TextField.TYPE_STORED));
+      document.add(newField(random, "value", value, TextField.TYPE_STORED));
+
+      final RandomIndexWriter w;
+      boolean from = context.randomFrom[randomI];
+      if (from) {
+        if (!context.fromDocuments.containsKey(linkValue)) {
+          context.fromDocuments.put(linkValue, new ArrayList<RandomDoc>());
+        }
+        if (!context.randomValueFromDocs.containsKey(value)) {
+          context.randomValueFromDocs.put(value, new ArrayList<RandomDoc>());
+        }
+
+        RandomDoc doc = new RandomDoc(id, linkValue, value);
+        context.fromDocuments.get(linkValue).add(doc);
+        context.randomValueFromDocs.get(value).add(doc);
+        document.add(newField(random, "from", linkValue, TextField.TYPE_STORED));
+        w = fromWriter;
+      } else {
+        if (!context.toDocuments.containsKey(linkValue)) {
+          context.toDocuments.put(linkValue, new ArrayList<RandomDoc>());
+        }
+        if (!context.randomValueToDocs.containsKey(value)) {
+          context.randomValueToDocs.put(value, new ArrayList<RandomDoc>());
+        }
+
+        RandomDoc doc = new RandomDoc(id, linkValue, value);
+        context.toDocuments.get(linkValue).add(doc);
+        context.randomValueToDocs.get(value).add(doc);
+        document.add(newField(random, "to", linkValue, TextField.TYPE_STORED));
+        w = toWriter;
+      }
+
+      w.addDocument(document);
+      if (random.nextInt(10) == 4) {
+        w.commit();
+      }
+      if (VERBOSE) {
+        System.out.println("Added document["+ i +"]: " + document);
+      }
+    }
+    return context;
+  }
+
+  private FixedBitSet createExpectedResult(String queryValue, boolean from, IndexReader topLevelReader, IndexIterationContext context) throws IOException {
+    final Map<String, List<RandomDoc>> randomValueDocs;
+    final Map<String, List<RandomDoc>> linkValueDocuments;
+    if (from) {
+      randomValueDocs = context.randomValueFromDocs;
+      linkValueDocuments = context.toDocuments;
+    } else {
+      randomValueDocs = context.randomValueToDocs;
+      linkValueDocuments = context.fromDocuments;
+    }
+
+    FixedBitSet expectedResult = new FixedBitSet(topLevelReader.maxDoc());
+    List<RandomDoc> matchingDocs = randomValueDocs.get(queryValue);
+    if (matchingDocs == null) {
+      return new FixedBitSet(topLevelReader.maxDoc());
+    }
+
+    for (RandomDoc matchingDoc : matchingDocs) {
+      List<RandomDoc> otherMatchingDocs = linkValueDocuments.get(matchingDoc.link);
+      if (otherMatchingDocs == null) {
+        continue;
+      }
+
+      for (RandomDoc otherSideDoc : otherMatchingDocs) {
+        DocsEnum docsEnum = MultiFields.getTermDocsEnum(topLevelReader, MultiFields.getLiveDocs(topLevelReader), "id", new BytesRef(otherSideDoc.id), false);
+        assert docsEnum != null;
+        int doc = docsEnum.nextDoc();
+        expectedResult.set(doc);
+      }
+    }
+    return expectedResult;
+  }
+
+  private static class IndexIterationContext {
+
+    String[] randomUniqueValues;
+    boolean[] randomFrom;
+    Map<String, List<RandomDoc>> fromDocuments = new HashMap<String, List<RandomDoc>>();
+    Map<String, List<RandomDoc>> toDocuments = new HashMap<String, List<RandomDoc>>();
+    Map<String, List<RandomDoc>> randomValueFromDocs = new HashMap<String, List<RandomDoc>>();
+    Map<String, List<RandomDoc>> randomValueToDocs = new HashMap<String, List<RandomDoc>>();
+
+  }
+
+  private static class RandomDoc {
+
+    final String id;
+    final String link;
+    final String value;
+
+    private RandomDoc(String id, String link, String value) {
+      this.id = id;
+      this.link = link;
+      this.value = value;
+    }
+  }
+
+}
