Index: lucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java =================================================================== --- lucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java (revision 1238087) +++ lucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java (working copy) @@ -24,6 +24,8 @@ import java.util.Set; import org.apache.lucene.index.AtomicReader; +import org.apache.lucene.index.CompositeReader; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.FieldCache; import org.apache.lucene.search.FieldCache.CacheEntry; @@ -146,6 +148,9 @@ insanity.addAll(checkValueMismatch(valIdToItems, readerFieldToValIds, valMismatchKeys)); + insanity.addAll(checkSubreaders(valIdToItems, + readerFieldToValIds)); + return insanity.toArray(new Insanity[insanity.size()]); } @@ -186,7 +191,108 @@ return insanity; } + /** + * Internal helper method used by check that iterates over + * the keys of readerFieldToValIds and generates a Collection + * of Insanity instances whenever two (or more) ReaderField instances are + * found that have an ancestry relationships. + * + * @see InsanityType#SUBREADER + */ + private Collection checkSubreaders( MapOfSets valIdToItems, + MapOfSets readerFieldToValIds) { + + final List insanity = new ArrayList(23); + + Map> badChildren = new HashMap>(17); + MapOfSets badKids = new MapOfSets(badChildren); // wrapper + + Map> viToItemSets = valIdToItems.getMap(); + Map> rfToValIdSets = readerFieldToValIds.getMap(); + + Set seen = new HashSet(17); + + Set readerFields = rfToValIdSets.keySet(); + for (final ReaderField rf : readerFields) { + + if (seen.contains(rf)) continue; + + List kids = getAllDescendantReaderKeys(rf.readerKey); + for (Object kidKey : kids) { + ReaderField kid = new ReaderField(kidKey, rf.fieldName); + + if (badChildren.containsKey(kid)) { + // we've already process this kid as RF and found other problems + // track those problems as our own + badKids.put(rf, kid); + badKids.putAll(rf, badChildren.get(kid)); + badChildren.remove(kid); + + } else if (rfToValIdSets.containsKey(kid)) { + // we have cache entries for the kid + badKids.put(rf, kid); + } + seen.add(kid); + } + seen.add(rf); + } + + // every mapping in badKids represents an Insanity + for (final ReaderField parent : badChildren.keySet()) { + Set kids = badChildren.get(parent); + + List badEntries = new ArrayList(kids.size() * 2); + + // put parent entr(ies) in first + { + for (final Integer value : rfToValIdSets.get(parent)) { + badEntries.addAll(viToItemSets.get(value)); + } + } + + // now the entries for the descendants + for (final ReaderField kid : kids) { + for (final Integer value : rfToValIdSets.get(kid)) { + badEntries.addAll(viToItemSets.get(value)); + } + } + + CacheEntry[] badness = new CacheEntry[badEntries.size()]; + badness = badEntries.toArray(badness); + + insanity.add(new Insanity(InsanityType.SUBREADER, + "Found caches for descendants of " + + parent.toString(), + badness)); + } + + return insanity; + + } + /** + * Checks if the seed is an IndexReader, and if so will walk + * the hierarchy of subReaders building up a list of the objects + * returned by obj.getFieldCacheKey() + */ + private List getAllDescendantReaderKeys(Object seed) { + List all = new ArrayList(17); // will grow as we iter + all.add(seed); + for (int i = 0; i < all.size(); i++) { + Object obj = all.get(i); + if (obj instanceof CompositeReader) { + IndexReader[] subs = ((CompositeReader)obj).getSequentialSubReaders(); + for (int j = 0; (null != subs) && (j < subs.length); j++) { + all.add(subs[j].getCoreCacheKey()); + } + } + + } + // need to skip the first, because it was the seed + return all.subList(1, all.size()); + } + + /** * Simple pair object for using "readerKey + fieldName" a Map key */ private final static class ReaderField { Index: lucene/src/test-framework/java/org/apache/lucene/search/CheckHits.java =================================================================== --- lucene/src/test-framework/java/org/apache/lucene/search/CheckHits.java (revision 1238087) +++ lucene/src/test-framework/java/org/apache/lucene/search/CheckHits.java (working copy) @@ -116,7 +116,7 @@ Assert.assertEquals("Wrap Reader " + i + ": " + query.toString(defaultFieldName), correct, actual); - // TODO: FieldCache.DEFAULT.purge(s.getIndexReader()); // our wrapping can create insanity otherwise + QueryUtils.purgeFieldCache(s.getIndexReader()); // our wrapping can create insanity otherwise } } Index: lucene/src/test-framework/java/org/apache/lucene/search/QueryUtils.java =================================================================== --- lucene/src/test-framework/java/org/apache/lucene/search/QueryUtils.java (revision 1238087) +++ lucene/src/test-framework/java/org/apache/lucene/search/QueryUtils.java (working copy) @@ -31,6 +31,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MultiReader; +import org.apache.lucene.index.SlowCompositeReaderWrapper; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.RAMDirectory; @@ -116,14 +117,11 @@ if (wrap) { IndexSearcher wrapped; check(random, q1, wrapped = wrapUnderlyingReader(random, s, -1), false); - // TODO: I removed that as we can never get insanity by composite readers anymore... Is this ok? - //FieldCache.DEFAULT.purge(wrapped.getIndexReader()); // our wrapping can create insanity otherwise + purgeFieldCache(wrapped.getIndexReader()); // our wrapping can create insanity otherwise check(random, q1, wrapped = wrapUnderlyingReader(random, s, 0), false); - // TODO: I removed that as we can never get insanity by composite readers anymore... Is this ok? - //FieldCache.DEFAULT.purge(wrapped.getIndexReader()); // our wrapping can create insanity otherwise + purgeFieldCache(wrapped.getIndexReader()); // our wrapping can create insanity otherwise check(random, q1, wrapped = wrapUnderlyingReader(random, s, +1), false); - // TODO: I removed that as we can never get insanity by composite readers anymore... Is this ok? - //FieldCache.DEFAULT.purge(wrapped.getIndexReader()); // our wrapping can create insanity otherwise + purgeFieldCache(wrapped.getIndexReader()); // our wrapping can create insanity otherwise } checkExplanations(q1,s); @@ -135,6 +133,11 @@ throw new RuntimeException(e); } } + + public static void purgeFieldCache(IndexReader r) throws IOException { + // this is just a hack, to get an atomic reader that contains all subreaders for insanity checks + FieldCache.DEFAULT.purge(SlowCompositeReaderWrapper.wrap(r)); + } /** * Given an IndexSearcher, returns a new IndexSearcher whose IndexReader Index: lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java =================================================================== --- lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java (revision 1238087) +++ lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java (working copy) @@ -138,4 +138,31 @@ cache.purgeAllCaches(); } + public void testInsanity2() throws IOException { + FieldCache cache = FieldCache.DEFAULT; + cache.purgeAllCaches(); + + cache.getTerms(readerA, "theString"); + cache.getTerms(readerB, "theString"); + cache.getTerms(readerX, "theString"); + + cache.getBytes(readerX, "theByte", false); + + + // // // + + Insanity[] insanity = + FieldCacheSanityChecker.checkSanity(cache.getCacheEntries()); + + assertEquals("wrong number of cache errors", 1, insanity.length); + assertEquals("wrong type of cache error", + InsanityType.SUBREADER, + insanity[0].getType()); + assertEquals("wrong number of entries in cache error", 3, + insanity[0].getCacheEntries().length); + + // we expect bad things, don't let tearDown complain about them + cache.purgeAllCaches(); + } + } Index: modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java =================================================================== --- modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java (revision 1238087) +++ modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java (working copy) @@ -366,7 +366,7 @@ } } } finally { - // TODO: FieldCache.DEFAULT.purge(r); + QueryUtils.purgeFieldCache(r); } r.close(); Index: modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java =================================================================== --- modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java (revision 1238087) +++ modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java (working copy) @@ -1140,9 +1140,9 @@ assertEquals(docIDToIDBlocks, expectedGroups, topGroupsBlockShards, false, false, fillFields, getScores, false); } } finally { - // TODO: FieldCache.DEFAULT.purge(r); + QueryUtils.purgeFieldCache(r); if (rBlocks != null) { - // TODO: FieldCache.DEFAULT.purge(rBlocks); + QueryUtils.purgeFieldCache(rBlocks); } }