Index: solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java =================================================================== --- solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java (revision 1245069) +++ solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java (working copy) @@ -28,8 +28,6 @@ import org.apache.noggit.ObjectBuilder; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.SolrException; -import org.apache.solr.common.SolrInputDocument; -import org.apache.solr.common.params.SolrParams; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.update.UpdateHandler; import org.apache.solr.update.UpdateLog; @@ -1673,7 +1671,7 @@ public int getFirstMatch(IndexReader r, Term t) throws IOException { - Fields fields = MultiFields.getFields(r); + InvertedFields fields = MultiInvertedFields.getFields(r); if (fields == null) return -1; Terms terms = fields.terms(t.field()); if (terms == null) return -1; @@ -1682,7 +1680,7 @@ if (!termsEnum.seekExact(termBytes, false)) { return -1; } - DocsEnum docs = termsEnum.docs(MultiFields.getLiveDocs(r), null, false); + DocsEnum docs = termsEnum.docs(MultiInvertedFields.getLiveDocs(r), null, false); int id = docs.nextDoc(); if (id != DocIdSetIterator.NO_MORE_DOCS) { int next = docs.nextDoc(); Index: solr/core/src/java/org/apache/solr/request/SimpleFacets.java =================================================================== --- solr/core/src/java/org/apache/solr/request/SimpleFacets.java (revision 1245069) +++ solr/core/src/java/org/apache/solr/request/SimpleFacets.java (working copy) @@ -34,7 +34,6 @@ import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.common.util.StrUtils; -import org.apache.solr.core.SolrCore; import org.apache.solr.schema.*; import org.apache.solr.search.*; import org.apache.solr.util.BoundedTreeSet; @@ -627,7 +626,7 @@ startTermBytes = new BytesRef(indexedPrefix); } - Fields fields = r.fields(); + InvertedFields fields = r.fields(); Terms terms = fields==null ? null : fields.terms(field); TermsEnum termsEnum = null; SolrIndexSearcher.DocsEnumState deState = null; Index: solr/core/src/java/org/apache/solr/schema/IndexSchema.java =================================================================== --- solr/core/src/java/org/apache/solr/schema/IndexSchema.java (revision 1245069) +++ solr/core/src/java/org/apache/solr/schema/IndexSchema.java (working copy) @@ -356,7 +356,7 @@ XPathConstants.NODESET); typeLoader.load( loader, nodes ); - // load the Fields + // load the InvertedFields // Hang on to the fields that say if they are required -- this lets us set a reasonable default for the unique key Map explicitRequiredProp = new HashMap(); Index: solr/core/src/java/org/apache/solr/schema/FieldType.java =================================================================== --- solr/core/src/java/org/apache/solr/schema/FieldType.java (revision 1245069) +++ solr/core/src/java/org/apache/solr/schema/FieldType.java (working copy) @@ -265,7 +265,7 @@ /** * Create the field from native Lucene parts. Mostly intended for use by FieldTypes outputing multiple - * Fields per SchemaField + * InvertedFields per SchemaField * @param name The name of the field * @param val The _internal_ value to index * @param type {@link org.apache.lucene.document.FieldType} Index: solr/core/src/java/org/apache/solr/schema/ExternalFileField.java =================================================================== --- solr/core/src/java/org/apache/solr/schema/ExternalFileField.java (revision 1245069) +++ solr/core/src/java/org/apache/solr/schema/ExternalFileField.java (working copy) @@ -51,7 +51,7 @@ * This is to help support systems where one may not be able to overwrite a file (like Windows, if the file is in use). *

If the external file has already been loaded, and it is changed, those changes will not be visible until a commit has been done. *

The external file may be sorted or unsorted by the key field, but it will be substantially slower (untested) if it isn't sorted. - *

Fields of this type may currently only be used as a ValueSource in a FunctionQuery. + *

InvertedFields of this type may currently only be used as a ValueSource in a FunctionQuery. * * */ Index: solr/core/src/java/org/apache/solr/schema/AbstractSubTypeFieldType.java =================================================================== --- solr/core/src/java/org/apache/solr/schema/AbstractSubTypeFieldType.java (revision 1245069) +++ solr/core/src/java/org/apache/solr/schema/AbstractSubTypeFieldType.java (working copy) @@ -94,7 +94,7 @@ } public void inform(IndexSchema schema) { - //Can't do this until here b/c the Dynamic Fields are not initialized until here. + //Can't do this until here b/c the Dynamic InvertedFields are not initialized until here. if (subType != null) { SchemaField proto = registerPolyFieldDynamicPrototype(schema, subType); dynFieldProps = proto.getProperties(); Index: solr/core/src/java/org/apache/solr/search/function/FileFloatSource.java =================================================================== --- solr/core/src/java/org/apache/solr/search/function/FileFloatSource.java (revision 1245069) +++ solr/core/src/java/org/apache/solr/search/function/FileFloatSource.java (working copy) @@ -29,7 +29,7 @@ import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.AtomicReaderContext; @@ -241,11 +241,11 @@ BytesRef internalKey = new BytesRef(); try { - TermsEnum termsEnum = MultiFields.getTerms(reader, idName).iterator(null); + TermsEnum termsEnum = MultiInvertedFields.getTerms(reader, idName).iterator(null); DocsEnum docsEnum = null; // removing deleted docs shouldn't matter - // final Bits liveDocs = MultiFields.getLiveDocs(reader); + // final Bits liveDocs = MultiInvertedFields.getLiveDocs(reader); for (String line; (line=r.readLine())!=null;) { int delimIndex = line.lastIndexOf(delimiter); Index: solr/core/src/java/org/apache/solr/search/JoinQParserPlugin.java =================================================================== --- solr/core/src/java/org/apache/solr/search/JoinQParserPlugin.java (revision 1245069) +++ solr/core/src/java/org/apache/solr/search/JoinQParserPlugin.java (working copy) @@ -262,8 +262,8 @@ fastForRandomSet = new HashDocSet(sset.getDocs(), 0, sset.size()); } - Fields fromFields = fromSearcher.getAtomicReader().fields(); - Fields toFields = fromSearcher==toSearcher ? fromFields : toSearcher.getAtomicReader().fields(); + InvertedFields fromFields = fromSearcher.getAtomicReader().fields(); + InvertedFields toFields = fromSearcher==toSearcher ? fromFields : toSearcher.getAtomicReader().fields(); if (fromFields == null) return DocSet.EMPTY; Terms terms = fromFields.terms(fromField); Terms toTerms = toFields.terms(toField); Index: solr/core/src/java/org/apache/solr/search/DisMaxQParserPlugin.java =================================================================== --- solr/core/src/java/org/apache/solr/search/DisMaxQParserPlugin.java (revision 1245069) +++ solr/core/src/java/org/apache/solr/search/DisMaxQParserPlugin.java (working copy) @@ -48,7 +48,7 @@ *

  • tie - (Tie breaker) float value to use as tiebreaker in * DisjunctionMaxQueries (should be something much less than 1) *
  • - *
  • qf - (Query Fields) fields and boosts to use when building + *
  • qf - (Query InvertedFields) fields and boosts to use when building * DisjunctionMaxQueries from the users query. Format is: * "fieldA^1.0 fieldB^2.2". * This param can be specified multiple times, and the fields @@ -58,7 +58,7 @@ * complex expressions. * read {@link org.apache.solr.util.SolrPluginUtils#setMinShouldMatch SolrPluginUtils.setMinShouldMatch} and mm expression format for details. *
  • - *
  • pf - (Phrase Fields) fields/boosts to make phrase queries out + *
  • pf - (Phrase InvertedFields) fields/boosts to make phrase queries out * of, to boost the users query for exact matches on the specified fields. * Format is: "fieldA^1.0 fieldB^2.2". * This param can be specified multiple times, and the fields Index: solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java =================================================================== --- solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java (revision 1245069) +++ solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java (working copy) @@ -38,7 +38,6 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.OpenBitSet; -import org.apache.lucene.util.ReaderUtil; import org.apache.solr.common.params.ModifiableSolrParams; import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.SimpleOrderedMap; @@ -569,7 +568,7 @@ * @return the first document number containing the term */ public int getFirstMatch(Term t) throws IOException { - Fields fields = atomicReader.fields(); + InvertedFields fields = atomicReader.fields(); if (fields == null) return -1; Terms terms = fields.terms(t.field()); if (terms == null) return -1; @@ -597,7 +596,7 @@ final AtomicReaderContext leaf = leaves[i]; final AtomicReader reader = leaf.reader(); - final Fields fields = reader.fields(); + final InvertedFields fields = reader.fields(); if (fields == null) continue; final Bits liveDocs = reader.getLiveDocs(); @@ -983,7 +982,7 @@ final AtomicReaderContext leaf = leaves[i]; final AtomicReader reader = leaf.reader(); collector.setNextReader(leaf); - Fields fields = reader.fields(); + InvertedFields fields = reader.fields(); Terms terms = fields.terms(t.field()); BytesRef termBytes = t.bytes(); Index: solr/core/src/java/org/apache/solr/util/SolrPluginUtils.java =================================================================== --- solr/core/src/java/org/apache/solr/util/SolrPluginUtils.java (revision 1245069) +++ solr/core/src/java/org/apache/solr/util/SolrPluginUtils.java (working copy) @@ -777,7 +777,7 @@ * * @param docs The {@link org.apache.solr.search.DocList} to convert * @param searcher The {@link org.apache.solr.search.SolrIndexSearcher} to use to load the docs from the Lucene index - * @param fields The names of the Fields to load + * @param fields The names of the InvertedFields to load * @param ids A map to store the ids of the docs * @return The new {@link org.apache.solr.common.SolrDocumentList} containing all the loaded docs * @throws java.io.IOException if there was a problem loading the docs Index: solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java =================================================================== --- solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java (revision 1245069) +++ solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java (working copy) @@ -574,7 +574,7 @@ final CharsRef spare = new CharsRef(); - Fields fields = MultiFields.getFields(req.getSearcher().getIndexReader()); + InvertedFields fields = MultiInvertedFields.getFields(req.getSearcher().getIndexReader()); if (fields == null) { // No indexed fields return; Index: solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java =================================================================== --- solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java (revision 1245069) +++ solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java (working copy) @@ -535,7 +535,7 @@ public FieldComparator setNextReader(AtomicReaderContext context) throws IOException { //convert the ids to Lucene doc ids, the ordSet and termValues needs to be the same size as the number of elevation docs we have ordSet.clear(); - Fields fields = context.reader().fields(); + InvertedFields fields = context.reader().fields(); if (fields == null) return this; Terms terms = fields.terms(fieldname); if (terms == null) return this; Index: solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java =================================================================== --- solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java (revision 1245069) +++ solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java (working copy) @@ -10,7 +10,7 @@ import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.FieldsEnum; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.StoredFieldVisitor; @@ -251,7 +251,7 @@ } } else { // extract all fields - final Fields vectors = reader.getTermVectors(docId); + final InvertedFields vectors = reader.getTermVectors(docId); final FieldsEnum fieldsEnum = vectors.iterator(); String field; while((field = fieldsEnum.next()) != null) { Index: solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java =================================================================== --- solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java (revision 1245069) +++ solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java (working copy) @@ -118,7 +118,7 @@ final AtomicReader indexReader = rb.req.getSearcher().getAtomicReader(); - Fields lfields = indexReader.fields(); + InvertedFields lfields = indexReader.fields(); for (String field : fields) { NamedList fieldTerms = new NamedList(); Index: solr/solrj/src/test/org/apache/solr/common/params/SolrParamTest.java =================================================================== --- solr/solrj/src/test/org/apache/solr/common/params/SolrParamTest.java (revision 1245069) +++ solr/solrj/src/test/org/apache/solr/common/params/SolrParamTest.java (working copy) @@ -140,7 +140,7 @@ assertEquals( 400, getReturnCode( new Runnable() { public void run() { params.getFieldBool( "bad", "bool" ); } } ) ); assertEquals( 400, getReturnCode( new Runnable() { public void run() { params.getFieldInt( "bad", "int" ); } } ) ); - // Fields with default use their parent value: + // InvertedFields with default use their parent value: assertEquals( params.get( "aaaa", "str" ), required.get( "aaaa", "str" ) ); Index: solr/solrj/src/java/org/apache/solr/common/params/RequiredSolrParams.java =================================================================== --- solr/solrj/src/java/org/apache/solr/common/params/RequiredSolrParams.java (revision 1245069) +++ solr/solrj/src/java/org/apache/solr/common/params/RequiredSolrParams.java (working copy) @@ -23,7 +23,7 @@ /** * This is a simple wrapper to SolrParams that will throw a 400 - * exception if you ask for a parameter that does not exist. Fields + * exception if you ask for a parameter that does not exist. InvertedFields * specified with * * In short, any value you for from a RequiredSolrParams Index: solr/solrj/src/java/org/apache/solr/common/SolrDocument.java =================================================================== --- solr/solrj/src/java/org/apache/solr/common/SolrDocument.java (revision 1245069) +++ solr/solrj/src/java/org/apache/solr/common/SolrDocument.java (working copy) @@ -58,7 +58,7 @@ } /////////////////////////////////////////////////////////////////// - // Add / Set / Remove Fields + // Add / Set / Remove InvertedFields /////////////////////////////////////////////////////////////////// /** Index: solr/contrib/langid/src/test-files/langid/solr/conf/schema.xml =================================================================== --- solr/contrib/langid/src/test-files/langid/solr/conf/schema.xml (revision 1245069) +++ solr/contrib/langid/src/test-files/langid/solr/conf/schema.xml (working copy) @@ -68,7 +68,7 @@ - + Index: solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java =================================================================== --- solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java (revision 1245069) +++ solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java (working copy) @@ -554,7 +554,7 @@ fp.add("X-Mailer"); } - // Fields To Index + // InvertedFields To Index // single valued private static final String MESSAGE_ID = "messageId"; private static final String SUBJECT = "subject"; Index: modules/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java =================================================================== --- modules/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java (revision 1245069) +++ modules/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java (working copy) @@ -26,7 +26,7 @@ import java.util.PriorityQueue; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.search.FuzzyTermsEnum; @@ -388,7 +388,7 @@ AttributeSource atts = new AttributeSource(); MaxNonCompetitiveBoostAttribute maxBoostAtt = atts.addAttribute(MaxNonCompetitiveBoostAttribute.class); - Terms terms = MultiFields.getTerms(ir, term.field()); + Terms terms = MultiInvertedFields.getTerms(ir, term.field()); if (terms == null) { return Collections.emptyList(); } Index: modules/suggest/src/java/org/apache/lucene/search/spell/HighFrequencyDictionary.java =================================================================== --- modules/suggest/src/java/org/apache/lucene/search/spell/HighFrequencyDictionary.java (revision 1245069) +++ modules/suggest/src/java/org/apache/lucene/search/spell/HighFrequencyDictionary.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.Terms; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.UnicodeUtil; @@ -62,7 +62,7 @@ HighFrequencyIterator() { try { - Terms terms = MultiFields.getTerms(reader, field); + Terms terms = MultiInvertedFields.getTerms(reader, field); if (terms != null) { termsEnum = terms.iterator(null); } Index: modules/suggest/src/java/org/apache/lucene/search/spell/LuceneDictionary.java =================================================================== --- modules/suggest/src/java/org/apache/lucene/search/spell/LuceneDictionary.java (revision 1245069) +++ modules/suggest/src/java/org/apache/lucene/search/spell/LuceneDictionary.java (working copy) @@ -26,7 +26,7 @@ import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.UnicodeUtil; import org.apache.lucene.index.Terms; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import java.io.*; @@ -61,7 +61,7 @@ LuceneIterator() { try { - final Terms terms = MultiFields.getTerms(reader, field); + final Terms terms = MultiInvertedFields.getTerms(reader, field); if (terms != null) { termsEnum = terms.iterator(null); pendingTerm = termsEnum.next(); Index: modules/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTermQuery.java =================================================================== --- modules/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTermQuery.java (revision 1245069) +++ modules/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTermQuery.java (working copy) @@ -22,7 +22,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.Terms; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.util.BytesRef; @@ -49,7 +49,7 @@ MatchingTermVisitor mtv) throws IOException { /* check term presence in index here for symmetry with other SimpleTerm's */ - Terms terms = MultiFields.getTerms(reader, fieldName); + Terms terms = MultiInvertedFields.getTerms(reader, fieldName); if (terms != null) { TermsEnum termsEnum = terms.iterator(null); Index: modules/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTruncQuery.java =================================================================== --- modules/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTruncQuery.java (revision 1245069) +++ modules/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTruncQuery.java (working copy) @@ -16,13 +16,13 @@ * limitations under the License. */ +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.Terms; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.StringHelper; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; import java.io.IOException; @@ -90,7 +90,7 @@ MatchingTermVisitor mtv) throws IOException { int prefixLength = prefix.length(); - Terms terms = MultiFields.getTerms(reader, fieldName); + Terms terms = MultiInvertedFields.getTerms(reader, fieldName); if (terms != null) { Matcher matcher = pattern.matcher(""); try { Index: modules/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndPrefixQuery.java =================================================================== --- modules/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndPrefixQuery.java (revision 1245069) +++ modules/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndPrefixQuery.java (working copy) @@ -22,7 +22,7 @@ import org.apache.lucene.util.StringHelper; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import java.io.IOException; @@ -59,7 +59,7 @@ MatchingTermVisitor mtv) throws IOException { /* inspired by PrefixQuery.rewrite(): */ - Terms terms = MultiFields.getTerms(reader, fieldName); + Terms terms = MultiInvertedFields.getTerms(reader, fieldName); if (terms != null) { TermsEnum termsEnum = terms.iterator(null); Index: modules/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java =================================================================== --- modules/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java (revision 1245069) +++ modules/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java (working copy) @@ -225,7 +225,7 @@ * * @param matchVersion Lucene version to match; this is passed through to QueryParser. * @param queries Queries strings to parse - * @param fields Fields to search on + * @param fields InvertedFields to search on * @param analyzer Analyzer to use * @throws ParseException if query parsing fails * @throws IllegalArgumentException if the length of the queries array differs @@ -273,7 +273,7 @@ * * @param matchVersion Lucene version to match; this is passed through to QueryParser. * @param query Query string to parse - * @param fields Fields to search on + * @param fields InvertedFields to search on * @param flags Flags describing the fields * @param analyzer Analyzer to use * @throws ParseException if query parsing fails @@ -321,7 +321,7 @@ * * @param matchVersion Lucene version to match; this is passed through to QueryParser. * @param queries Queries string to parse - * @param fields Fields to search on + * @param fields InvertedFields to search on * @param flags Flags describing the fields * @param analyzer Analyzer to use * @throws ParseException if query parsing fails Index: modules/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/QueryParserUtil.java =================================================================== --- modules/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/QueryParserUtil.java (revision 1245069) +++ modules/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/QueryParserUtil.java (working copy) @@ -43,7 +43,7 @@ * @param queries * Queries strings to parse * @param fields - * Fields to search on + * InvertedFields to search on * @param analyzer * Analyzer to use * @throws IllegalArgumentException @@ -97,7 +97,7 @@ * @param query * Query string to parse * @param fields - * Fields to search on + * InvertedFields to search on * @param flags * Flags describing the fields * @param analyzer @@ -154,7 +154,7 @@ * @param queries * Queries string to parse * @param fields - * Fields to search on + * InvertedFields to search on * @param flags * Flags describing the fields * @param analyzer Index: modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java =================================================================== --- modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java (revision 1245069) +++ modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java (working copy) @@ -20,7 +20,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig.OpenMode; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; @@ -277,11 +277,11 @@ if (!handledTerms.add(baseTerm)) { continue; // already handled this term (for another list) } - Terms terms = MultiFields.getTerms(indexReader, baseTerm.field()); + Terms terms = MultiInvertedFields.getTerms(indexReader, baseTerm.field()); if (terms == null) { continue; } - Bits liveDocs = MultiFields.getLiveDocs(indexReader); + Bits liveDocs = MultiInvertedFields.getLiveDocs(indexReader); TermsEnum te = terms.iterator(null); DocsEnum de = null; while (te.next() != null) { Index: modules/facet/src/test/org/apache/lucene/facet/search/TestMultipleCategoryLists.java =================================================================== --- modules/facet/src/test/org/apache/lucene/facet/search/TestMultipleCategoryLists.java (revision 1245069) +++ modules/facet/src/test/org/apache/lucene/facet/search/TestMultipleCategoryLists.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig.OpenMode; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocIdSetIterator; @@ -89,7 +89,7 @@ // Obtain facets results and hand-test them assertCorrectResults(facetsCollector); - DocsEnum td = _TestUtil.docs(random, ir, "$facets", new BytesRef("$fulltree$"), MultiFields.getLiveDocs(ir), null, false); + DocsEnum td = _TestUtil.docs(random, ir, "$facets", new BytesRef("$fulltree$"), MultiInvertedFields.getLiveDocs(ir), null, false); assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); tr.close(); Index: modules/facet/src/test/org/apache/lucene/facet/util/TestScoredDocIDsUtils.java =================================================================== --- modules/facet/src/test/org/apache/lucene/facet/util/TestScoredDocIDsUtils.java (revision 1245069) +++ modules/facet/src/test/org/apache/lucene/facet/util/TestScoredDocIDsUtils.java (working copy) @@ -10,7 +10,7 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocIdSet; @@ -157,7 +157,7 @@ // now make sure the documents in the complement set are not deleted // and not in the original result set ScoredDocIDsIterator compIterator = complementSet.iterator(); - Bits live = MultiFields.getLiveDocs(reader); + Bits live = MultiInvertedFields.getLiveDocs(reader); while (compIterator.next()) { int docNum = compIterator.getDocID(); assertFalse( Index: modules/facet/src/java/org/apache/lucene/facet/search/sampling/TakmiSampleFixer.java =================================================================== --- modules/facet/src/java/org/apache/lucene/facet/search/sampling/TakmiSampleFixer.java (revision 1245069) +++ modules/facet/src/java/org/apache/lucene/facet/search/sampling/TakmiSampleFixer.java (working copy) @@ -3,7 +3,7 @@ import java.io.IOException; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.Term; import org.apache.lucene.index.DocsEnum; import org.apache.lucene.search.DocIdSetIterator; @@ -105,10 +105,10 @@ Term drillDownTerm = DrillDown.term(searchParams, catPath); // TODO (Facet): avoid Multi*? - Bits liveDocs = MultiFields.getLiveDocs(indexReader); - int updatedCount = countIntersection(MultiFields.getTermDocsEnum(indexReader, liveDocs, - drillDownTerm.field(), drillDownTerm.bytes(), - false), + Bits liveDocs = MultiInvertedFields.getLiveDocs(indexReader); + int updatedCount = countIntersection(MultiInvertedFields.getTermDocsEnum(indexReader, liveDocs, + drillDownTerm.field(), drillDownTerm.bytes(), + false), docIds.iterator()); fresNode.setValue(updatedCount); Index: modules/facet/src/java/org/apache/lucene/facet/search/PayloadIterator.java =================================================================== --- modules/facet/src/java/org/apache/lucene/facet/search/PayloadIterator.java (revision 1245069) +++ modules/facet/src/java/org/apache/lucene/facet/search/PayloadIterator.java (working copy) @@ -4,7 +4,7 @@ import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.util.Bits; @@ -52,8 +52,8 @@ throws IOException { this.buffer = buffer; // TODO (Facet): avoid Multi*? - Bits liveDocs = MultiFields.getLiveDocs(indexReader); - this.tp = MultiFields.getTermPositionsEnum(indexReader, liveDocs, term.field(), term.bytes(), false); + Bits liveDocs = MultiInvertedFields.getLiveDocs(indexReader); + this.tp = MultiInvertedFields.getTermPositionsEnum(indexReader, liveDocs, term.field(), term.bytes(), false); } /** Index: modules/facet/src/java/org/apache/lucene/facet/util/ScoredDocIdsUtils.java =================================================================== --- modules/facet/src/java/org/apache/lucene/facet/util/ScoredDocIdsUtils.java (revision 1245069) +++ modules/facet/src/java/org/apache/lucene/facet/util/ScoredDocIdsUtils.java (working copy) @@ -4,7 +4,7 @@ import java.util.Arrays; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.util.Bits; @@ -80,7 +80,7 @@ return; // return immediately } - Bits bits = MultiFields.getLiveDocs(reader); + Bits bits = MultiInvertedFields.getLiveDocs(reader); DocIdSetIterator it = set.iterator(); int doc = DocIdSetIterator.NO_MORE_DOCS; @@ -339,7 +339,7 @@ @Override public DocIdSetIterator iterator() throws IOException { return new DocIdSetIterator() { - final Bits liveDocs = MultiFields.getLiveDocs(reader); + final Bits liveDocs = MultiInvertedFields.getLiveDocs(reader); private int next = -1; @Override Index: modules/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java =================================================================== --- modules/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java (revision 1245069) +++ modules/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java (working copy) @@ -29,7 +29,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.LogByteSizeMergePolicy; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.SegmentInfos; @@ -415,10 +415,10 @@ } // TODO (Facet): avoid Multi*? - Bits liveDocs = MultiFields.getLiveDocs(reader); - DocsEnum docs = MultiFields.getTermDocsEnum(reader, liveDocs, Consts.FULL, - new BytesRef(categoryPath.toString(delimiter)), - false); + Bits liveDocs = MultiInvertedFields.getLiveDocs(reader); + DocsEnum docs = MultiInvertedFields.getTermDocsEnum(reader, liveDocs, Consts.FULL, + new BytesRef(categoryPath.toString(delimiter)), + false); if (docs == null || docs.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) { return -1; // category does not exist in taxonomy } @@ -452,10 +452,10 @@ if (reader == null) { reader = openReader(); } - Bits liveDocs = MultiFields.getLiveDocs(reader); - DocsEnum docs = MultiFields.getTermDocsEnum(reader, liveDocs, Consts.FULL, - new BytesRef(categoryPath.toString(delimiter, prefixLen)), - false); + Bits liveDocs = MultiInvertedFields.getLiveDocs(reader); + DocsEnum docs = MultiInvertedFields.getTermDocsEnum(reader, liveDocs, Consts.FULL, + new BytesRef(categoryPath.toString(delimiter, prefixLen)), + false); if (docs == null || docs.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) { return -1; // category does not exist in taxonomy } @@ -767,7 +767,7 @@ } CategoryPath cp = new CategoryPath(); - Terms terms = MultiFields.getTerms(reader, Consts.FULL); + Terms terms = MultiInvertedFields.getTerms(reader, Consts.FULL); // The check is done here to avoid checking it on every iteration of the // below loop. A null term wlil be returned if there are no terms in the // lexicon, or after the Consts.FULL term. However while the loop is @@ -775,7 +775,7 @@ // terms. if (terms != null) { TermsEnum termsEnum = terms.iterator(null); - Bits liveDocs = MultiFields.getLiveDocs(reader); + Bits liveDocs = MultiInvertedFields.getLiveDocs(reader); DocsEnum docsEnum = null; while (termsEnum.next() != null) { BytesRef t = termsEnum.term(); @@ -848,7 +848,7 @@ // of using the existing "reader" object: IndexReader mainreader = openReader(); // TODO (Facet): can this then go segment-by-segment and avoid MultiDocsEnum etc? - Terms terms = MultiFields.getTerms(mainreader, Consts.FULL); + Terms terms = MultiInvertedFields.getTerms(mainreader, Consts.FULL); assert terms != null; // TODO (Facet): explicit check / throw exception? TermsEnum mainte = terms.iterator(null); DocsEnum mainde = null; @@ -858,7 +858,7 @@ DocsEnum[] otherdocsEnum = new DocsEnum[taxonomies.length]; // just for reuse for (int i=0; i stopWords = new HashSet(); - Terms terms = MultiFields.getTerms(indexReader, field); + Terms terms = MultiInvertedFields.getTerms(indexReader, field); CharsRef spare = new CharsRef(); if (terms != null) { TermsEnum te = terms.iterator(null); Index: modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java =================================================================== --- modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (revision 1245069) +++ modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (working copy) @@ -41,7 +41,7 @@ import org.apache.lucene.facet.taxonomy.TaxonomyReader; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DocsEnum; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.FieldsEnum; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; @@ -49,7 +49,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LogDocMergePolicy; import org.apache.lucene.index.LogMergePolicy; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SerialMergeScheduler; import org.apache.lucene.index.SlowCompositeReaderWrapper; @@ -483,7 +483,7 @@ int totalTokenCount2 = 0; - FieldsEnum fields = MultiFields.getFields(reader).iterator(); + FieldsEnum fields = MultiInvertedFields.getFields(reader).iterator(); String fieldName = null; while((fieldName = fields.next()) != null) { if (fieldName.equals(DocMaker.ID_FIELD) || fieldName.equals(DocMaker.DATE_MSEC_FIELD) || fieldName.equals(DocMaker.TIME_SEC_FIELD)) { @@ -496,7 +496,7 @@ TermsEnum termsEnum = terms.iterator(null); DocsEnum docs = null; while(termsEnum.next() != null) { - docs = _TestUtil.docs(random, termsEnum, MultiFields.getLiveDocs(reader), docs, true); + docs = _TestUtil.docs(random, termsEnum, MultiInvertedFields.getLiveDocs(reader), docs, true); while(docs.nextDoc() != docs.NO_MORE_DOCS) { totalTokenCount2 += docs.freq(); } @@ -749,7 +749,7 @@ writer.close(); Directory dir = benchmark.getRunData().getDirectory(); IndexReader reader = IndexReader.open(dir); - Fields tfv = reader.getTermVectors(0); + InvertedFields tfv = reader.getTermVectors(0); assertNotNull(tfv); assertTrue(tfv.getUniqueFieldCount() > 0); reader.close(); Index: modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java (revision 1245069) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java (working copy) @@ -20,9 +20,9 @@ import java.io.IOException; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.Terms; -import org.apache.lucene.index.MultiFields; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.PriorityQueue; @@ -89,7 +89,7 @@ IndexReader ir = IndexReader.open(dir); try { int threshold = ir.maxDoc() / 10; // ignore words too common. - Terms terms = MultiFields.getTerms(ir, field); + Terms terms = MultiInvertedFields.getTerms(ir, field); if (terms != null) { TermsEnum termsEnum = terms.iterator(null); while (termsEnum.next() != null) { Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetVectorHighlightTask.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetVectorHighlightTask.java (revision 1245069) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetVectorHighlightTask.java (working copy) @@ -49,7 +49,7 @@ *
    "SearchVecHlgtSameRdr" SearchTravRetVectorHighlight(size[10],highlight[10],maxFrags[3],fields[body]) > : 1000
      * 
    * - * Fields must be stored and term vector offsets and positions in order must be true for this task to work. + * InvertedFields must be stored and term vector offsets and positions in order must be true for this task to work. * *

    Other side effects: counts additional 1 (record) for each traversed hit, * and 1 more for each retrieved (non null) document and 1 for each fragment returned.

    Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java (revision 1245069) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java (working copy) @@ -30,10 +30,9 @@ import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.search.Collector; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopScoreDocCollector; @@ -96,7 +95,7 @@ // optionally warm and add num docs traversed to count if (withWarm()) { Document doc = null; - Bits liveDocs = MultiFields.getLiveDocs(reader); + Bits liveDocs = MultiInvertedFields.getLiveDocs(reader); for (int m = 0; m < reader.maxDoc(); m++) { if (null == liveDocs || liveDocs.get(m)) { doc = reader.document(m); Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java (revision 1245069) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java (working copy) @@ -33,7 +33,7 @@ *

    Note: This task reuses the reader if it is already open. * Otherwise a reader is opened at start and closed at the end. * - *

    Takes optional param: comma separated list of Fields to load.

    + *

    Takes optional param: comma separated list of InvertedFields to load.

    * *

    Other side effects: counts additional 1 (record) for each traversed hit, * and 1 more for each retrieved (non null) document.

    Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.java (revision 1245069) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.java (working copy) @@ -69,7 +69,7 @@ public final static char SEP = '\t'; /** - * Fields to be written by default + * InvertedFields to be written by default */ public static final String[] DEFAULT_FIELDS = new String[] { DocMaker.TITLE_FIELD, Index: modules/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java =================================================================== --- modules/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java (revision 1245069) +++ modules/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java (working copy) @@ -320,7 +320,7 @@ } for (RandomDoc otherSideDoc : otherMatchingDocs) { - DocsEnum docsEnum = MultiFields.getTermDocsEnum(topLevelReader, MultiFields.getLiveDocs(topLevelReader), "id", new BytesRef(otherSideDoc.id), false); + DocsEnum docsEnum = MultiInvertedFields.getTermDocsEnum(topLevelReader, MultiInvertedFields.getLiveDocs(topLevelReader), "id", new BytesRef(otherSideDoc.id), false); assert docsEnum != null; int doc = docsEnum.nextDoc(); expectedResult.set(doc); Index: modules/queries/src/java/org/apache/lucene/queries/TermsFilter.java =================================================================== --- modules/queries/src/java/org/apache/lucene/queries/TermsFilter.java (revision 1245069) +++ modules/queries/src/java/org/apache/lucene/queries/TermsFilter.java (working copy) @@ -56,7 +56,7 @@ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException { AtomicReader reader = context.reader(); FixedBitSet result = new FixedBitSet(reader.maxDoc()); - Fields fields = reader.fields(); + InvertedFields fields = reader.fields(); if (fields == null) { return result; Index: modules/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java =================================================================== --- modules/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java (revision 1245069) +++ modules/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java (working copy) @@ -18,7 +18,7 @@ */ import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.search.Scorer; import org.apache.lucene.util.Bits; @@ -38,7 +38,7 @@ this.maxDoc = reader.maxDoc(); this.values = values; setCheckDeletes(true); - this.liveDocs = MultiFields.getLiveDocs(reader); + this.liveDocs = MultiInvertedFields.getLiveDocs(reader); } public IndexReader getReader() { Index: modules/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java =================================================================== --- modules/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java (revision 1245069) +++ modules/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java (working copy) @@ -20,7 +20,6 @@ import org.apache.lucene.index.AtomicReaderContext; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.*; -import org.apache.lucene.index.MultiFields; import org.apache.lucene.util.Bits; import java.io.IOException; Index: modules/queries/src/java/org/apache/lucene/queries/function/valuesource/SumTotalTermFreqValueSource.java =================================================================== --- modules/queries/src/java/org/apache/lucene/queries/function/valuesource/SumTotalTermFreqValueSource.java (revision 1245069) +++ modules/queries/src/java/org/apache/lucene/queries/function/valuesource/SumTotalTermFreqValueSource.java (working copy) @@ -18,13 +18,12 @@ package org.apache.lucene.queries.function.valuesource; import org.apache.lucene.index.AtomicReaderContext; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.Terms; import org.apache.lucene.queries.function.FunctionValues; import org.apache.lucene.queries.function.ValueSource; import org.apache.lucene.queries.function.docvalues.LongDocValues; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.util.BytesRef; import java.io.IOException; import java.util.Map; @@ -58,7 +57,7 @@ public void createWeight(Map context, IndexSearcher searcher) throws IOException { long sumTotalTermFreq = 0; for (AtomicReaderContext readerContext : searcher.getTopReaderContext().leaves()) { - Fields fields = readerContext.reader().fields(); + InvertedFields fields = readerContext.reader().fields(); if (fields == null) continue; Terms terms = fields.terms(indexedField); if (terms == null) continue; Index: modules/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java =================================================================== --- modules/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java (revision 1245069) +++ modules/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java (working copy) @@ -38,7 +38,7 @@ @Override public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { - Fields fields = readerContext.reader().fields(); + InvertedFields fields = readerContext.reader().fields(); final Terms terms = fields.terms(field); return new IntDocValues(this) { Index: modules/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java =================================================================== --- modules/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java (revision 1245069) +++ modules/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java (working copy) @@ -40,7 +40,7 @@ @Override public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { - Fields fields = readerContext.reader().fields(); + InvertedFields fields = readerContext.reader().fields(); final Terms terms = fields.terms(field); IndexSearcher searcher = (IndexSearcher)context.get("searcher"); final TFIDFSimilarity similarity = IDFValueSource.asTFIDF(searcher.getSimilarity(), field); Index: modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java =================================================================== --- modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java (revision 1245069) +++ modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java (working copy) @@ -22,10 +22,10 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.document.Document; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; @@ -570,7 +570,7 @@ public Query like(int docNum) throws IOException { if (fieldNames == null) { // gather list of valid fields from lucene - Collection fields = MultiFields.getIndexedFields(ir); + Collection fields = MultiInvertedFields.getIndexedFields(ir); fieldNames = fields.toArray(new String[fields.size()]); } @@ -705,7 +705,7 @@ public PriorityQueue retrieveTerms(int docNum) throws IOException { Map termFreqMap = new HashMap(); for (String fieldName : fieldNames) { - final Fields vectors = ir.getTermVectors(docNum); + final InvertedFields vectors = ir.getTermVectors(docNum); final Terms vector; if (vectors != null) { vector = vectors.terms(fieldName); Index: lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java =================================================================== --- lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (working copy) @@ -25,7 +25,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.Directory; @@ -71,30 +71,30 @@ writer.addDocument(doc); IndexReader reader = writer.getReader(); - DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, - MultiFields.getLiveDocs(reader), - "preanalyzed", - new BytesRef("term1"), - false); + DocsAndPositionsEnum termPositions = MultiInvertedFields.getTermPositionsEnum(reader, + MultiInvertedFields.getLiveDocs(reader), + "preanalyzed", + new BytesRef("term1"), + false); assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS); assertEquals(1, termPositions.freq()); assertEquals(0, termPositions.nextPosition()); - termPositions = MultiFields.getTermPositionsEnum(reader, - MultiFields.getLiveDocs(reader), - "preanalyzed", - new BytesRef("term2"), - false); + termPositions = MultiInvertedFields.getTermPositionsEnum(reader, + MultiInvertedFields.getLiveDocs(reader), + "preanalyzed", + new BytesRef("term2"), + false); assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS); assertEquals(2, termPositions.freq()); assertEquals(1, termPositions.nextPosition()); assertEquals(3, termPositions.nextPosition()); - termPositions = MultiFields.getTermPositionsEnum(reader, - MultiFields.getLiveDocs(reader), - "preanalyzed", - new BytesRef("term3"), - false); + termPositions = MultiInvertedFields.getTermPositionsEnum(reader, + MultiInvertedFields.getLiveDocs(reader), + "preanalyzed", + new BytesRef("term3"), + false); assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS); assertEquals(1, termPositions.freq()); assertEquals(2, termPositions.nextPosition()); Index: lucene/core/src/test/org/apache/lucene/codecs/pulsing/Test10KPulsings.java =================================================================== --- lucene/core/src/test/org/apache/lucene/codecs/pulsing/Test10KPulsings.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/codecs/pulsing/Test10KPulsings.java (working copy) @@ -25,7 +25,6 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.pulsing.Pulsing40PostingsFormat; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; @@ -33,7 +32,7 @@ import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.FieldInfo.IndexOptions; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; @@ -81,7 +80,7 @@ IndexReader ir = iw.getReader(); iw.close(); - TermsEnum te = MultiFields.getTerms(ir, "field").iterator(null); + TermsEnum te = MultiInvertedFields.getTerms(ir, "field").iterator(null); DocsEnum de = null; for (int i = 0; i < 10050; i++) { @@ -139,7 +138,7 @@ IndexReader ir = iw.getReader(); iw.close(); - TermsEnum te = MultiFields.getTerms(ir, "field").iterator(null); + TermsEnum te = MultiInvertedFields.getTerms(ir, "field").iterator(null); DocsEnum de = null; for (int i = 0; i < 10050; i++) { Index: lucene/core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java =================================================================== --- lucene/core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java (working copy) @@ -21,16 +21,15 @@ import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.codecs.appending.AppendingCodec; import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.TextField; import org.apache.lucene.index.DocsEnum; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum.SeekStatus; import org.apache.lucene.index.TermsEnum; @@ -128,7 +127,7 @@ assertEquals(2, reader.numDocs()); Document doc2 = reader.document(0); assertEquals(text, doc2.get("f")); - Fields fields = MultiFields.getFields(reader); + InvertedFields fields = MultiInvertedFields.getFields(reader); Terms terms = fields.terms("f"); assertNotNull(terms); TermsEnum te = terms.iterator(null); Index: lucene/core/src/test/org/apache/lucene/codecs/lucene3x/TestTermInfosReaderIndex.java =================================================================== --- lucene/core/src/test/org/apache/lucene/codecs/lucene3x/TestTermInfosReaderIndex.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/codecs/lucene3x/TestTermInfosReaderIndex.java (working copy) @@ -25,12 +25,7 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.FieldInfosReader; -import org.apache.lucene.codecs.lucene3x.Lucene3xPostingsFormat; -import org.apache.lucene.codecs.lucene3x.PreFlexRWCodec; -import org.apache.lucene.codecs.lucene3x.SegmentTermEnum; -import org.apache.lucene.codecs.lucene3x.TermInfosReaderIndex; import org.apache.lucene.document.Document; import org.apache.lucene.document.StringField; import org.apache.lucene.index.CorruptIndexException; @@ -41,7 +36,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LogMergePolicy; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.SegmentReader; import org.apache.lucene.index.Term; @@ -159,7 +154,7 @@ private static List sample(IndexReader reader, int size) throws IOException { List sample = new ArrayList(); Random random = new Random(); - FieldsEnum fieldsEnum = MultiFields.getFields(reader).iterator(); + FieldsEnum fieldsEnum = MultiInvertedFields.getFields(reader).iterator(); String field; while((field = fieldsEnum.next()) != null) { Terms terms = fieldsEnum.terms(); Index: lucene/core/src/test/org/apache/lucene/codecs/lucene3x/TestSurrogates.java =================================================================== --- lucene/core/src/test/org/apache/lucene/codecs/lucene3x/TestSurrogates.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/codecs/lucene3x/TestSurrogates.java (working copy) @@ -18,7 +18,6 @@ */ import org.apache.lucene.store.*; -import org.apache.lucene.codecs.lucene3x.PreFlexRWCodec; import org.apache.lucene.document.*; import org.apache.lucene.analysis.*; import org.apache.lucene.index.*; @@ -101,7 +100,7 @@ if (VERBOSE) { System.out.println("\nTEST: top now enum reader=" + reader); } - FieldsEnum fieldsEnum = MultiFields.getFields(reader).iterator(); + FieldsEnum fieldsEnum = MultiInvertedFields.getFields(reader).iterator(); { // Test straight enum: @@ -164,7 +163,7 @@ // seek to it TermsEnum te = tes.get(field); if (te == null) { - te = MultiFields.getTerms(reader, field).iterator(null); + te = MultiInvertedFields.getTerms(reader, field).iterator(null); tes.put(field, te); } @@ -229,7 +228,7 @@ // term does not exist: TermsEnum te = tes.get(field); if (te == null) { - te = MultiFields.getTerms(reader, field).iterator(null); + te = MultiInvertedFields.getTerms(reader, field).iterator(null); tes.put(field, te); } @@ -342,7 +341,7 @@ //SegmentInfo si = makePreFlexSegment(r, "_0", dir, fieldInfos, codec, fieldTerms); - //FieldsProducer fields = codec.fieldsProducer(new SegmentReadState(dir, si, fieldInfos, 1024, 1)); + //InvertedInvertedFieldsProducer fields = codec.fieldsProducer(new SegmentReadState(dir, si, fieldInfos, 1024, 1)); //assertNotNull(fields); doTestStraightEnum(fieldTerms, reader, uniqueTermCount); Index: lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java (working copy) @@ -96,7 +96,7 @@ assertEquals(100, hits.length); for (int i = 0; i < hits.length; i++) { - Fields vectors = searcher.reader.getTermVectors(hits[i].doc); + InvertedFields vectors = searcher.reader.getTermVectors(hits[i].doc); assertNotNull(vectors); assertEquals("doc=" + hits[i].doc + " tv=" + vectors, 1, vectors.getUniqueFieldCount()); } @@ -120,7 +120,7 @@ writer.addDocument(doc); IndexReader reader = writer.getReader(); writer.close(); - Fields v = reader.getTermVectors(0); + InvertedFields v = reader.getTermVectors(0); assertEquals(4, v.getUniqueFieldCount()); String[] expectedFields = new String[]{"a", "b", "c", "x"}; int[] expectedPositions = new int[]{1, 2, 0}; @@ -164,7 +164,7 @@ DocsAndPositionsEnum dpEnum = null; for (int i = 0; i < hits.length; i++) { - Fields vectors = searcher.reader.getTermVectors(hits[i].doc); + InvertedFields vectors = searcher.reader.getTermVectors(hits[i].doc); assertNotNull(vectors); assertEquals(1, vectors.getUniqueFieldCount()); @@ -203,7 +203,7 @@ assertEquals(100, hits.length); for (int i = 0; i < hits.length; i++) { - Fields vectors = searcher.reader.getTermVectors(hits[i].doc); + InvertedFields vectors = searcher.reader.getTermVectors(hits[i].doc); assertNotNull(vectors); assertEquals(1, vectors.getUniqueFieldCount()); } @@ -251,7 +251,7 @@ writer.close(); IndexSearcher knownSearcher = newSearcher(reader); knownSearcher.setSimilarity(new DefaultSimilarity()); - FieldsEnum fields = MultiFields.getFields(knownSearcher.reader).iterator(); + FieldsEnum fields = MultiInvertedFields.getFields(knownSearcher.reader).iterator(); DocsEnum docs = null; while(fields.next() != null) { @@ -261,7 +261,7 @@ while (termsEnum.next() != null) { String text = termsEnum.term().utf8ToString(); - docs = _TestUtil.docs(random, termsEnum, MultiFields.getLiveDocs(knownSearcher.reader), docs, true); + docs = _TestUtil.docs(random, termsEnum, MultiInvertedFields.getLiveDocs(knownSearcher.reader), docs, true); while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) { int docId = docs.docID(); @@ -369,7 +369,7 @@ assertEquals(10, hits.length); for (int i = 0; i < hits.length; i++) { - Fields vectors = searcher.reader.getTermVectors(hits[i].doc); + InvertedFields vectors = searcher.reader.getTermVectors(hits[i].doc); assertNotNull(vectors); assertEquals(1, vectors.getUniqueFieldCount()); } @@ -416,7 +416,7 @@ ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - Fields vectors = searcher.reader.getTermVectors(hits[0].doc); + InvertedFields vectors = searcher.reader.getTermVectors(hits[0].doc); assertNotNull(vectors); assertEquals(1, vectors.getUniqueFieldCount()); Terms vector = vectors.terms("field"); Index: lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java (working copy) @@ -17,6 +17,7 @@ * limitations under the License. */ +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.document.Document; import org.apache.lucene.document.TextField; @@ -24,7 +25,6 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; -import org.apache.lucene.index.MultiFields; import org.apache.lucene.util.BytesRef; import org.apache.lucene.store.Directory; @@ -73,7 +73,7 @@ // this TermEnum gives "piccadilly", "pie" and "pizza". String prefix = "pi"; - TermsEnum te = MultiFields.getFields(reader).terms("body").iterator(null); + TermsEnum te = MultiInvertedFields.getFields(reader).terms("body").iterator(null); te.seekCeil(new BytesRef(prefix)); do { String s = te.term().utf8ToString(); Index: lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.SingleTermsEnum; import org.apache.lucene.index.Term; @@ -173,7 +173,7 @@ public void testRewriteSingleTerm() throws IOException { AutomatonQuery aq = new AutomatonQuery(newTerm("bogus"), BasicAutomata .makeString("piece")); - Terms terms = MultiFields.getTerms(searcher.getIndexReader(), FN); + Terms terms = MultiInvertedFields.getTerms(searcher.getIndexReader(), FN); assertTrue(aq.getTermsEnum(terms) instanceof SingleTermsEnum); assertEquals(1, automatonQueryNrHits(aq)); } @@ -188,7 +188,7 @@ Automaton prefixAutomaton = BasicOperations.concatenate(pfx, BasicAutomata .makeAnyString()); AutomatonQuery aq = new AutomatonQuery(newTerm("bogus"), prefixAutomaton); - Terms terms = MultiFields.getTerms(searcher.getIndexReader(), FN); + Terms terms = MultiInvertedFields.getTerms(searcher.getIndexReader(), FN); assertTrue(aq.getTermsEnum(terms) instanceof PrefixTermsEnum); assertEquals(3, automatonQueryNrHits(aq)); } @@ -201,7 +201,7 @@ .makeEmpty()); // not yet available: assertTrue(aq.getEnum(searcher.getIndexReader()) // instanceof EmptyTermEnum); - Terms terms = MultiFields.getTerms(searcher.getIndexReader(), FN); + Terms terms = MultiInvertedFields.getTerms(searcher.getIndexReader(), FN); assertSame(TermsEnum.EMPTY, aq.getTermsEnum(terms)); assertEquals(0, automatonQueryNrHits(aq)); } Index: lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java (working copy) @@ -21,7 +21,7 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.*; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.FieldsEnum; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; @@ -163,7 +163,7 @@ long start = 0L; for (int docId = 0; docId < numDocs; docId++) { start = System.currentTimeMillis(); - Fields vectors = reader.getTermVectors(docId); + InvertedFields vectors = reader.getTermVectors(docId); timeElapsed += System.currentTimeMillis()-start; // verify vectors result @@ -177,7 +177,7 @@ } } - private void verifyVectors(Fields vectors, int num) throws IOException { + private void verifyVectors(InvertedFields vectors, int num) throws IOException { FieldsEnum fieldsEnum = vectors.iterator(); while(fieldsEnum.next() != null) { Terms terms = fieldsEnum.terms(); Index: lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.document.NumericField; import org.apache.lucene.index.AtomicReaderContext; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.SlowCompositeReaderWrapper; import org.apache.lucene.index.Terms; @@ -440,7 +440,7 @@ } private int countTerms(MultiTermQuery q) throws Exception { - final Terms terms = MultiFields.getTerms(reader, q.getField()); + final Terms terms = MultiInvertedFields.getTerms(reader, q.getField()); if (terms == null) return 0; final TermsEnum termEnum = q.getTermsEnum(terms); Index: lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (working copy) @@ -28,11 +28,10 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.similarities.DefaultSimilarity; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; @@ -71,7 +70,7 @@ // this TermEnum gives "piccadilly", "pie" and "pizza". String prefix = "pi"; - TermsEnum te = MultiFields.getFields(reader).terms("body").iterator(null); + TermsEnum te = MultiInvertedFields.getFields(reader).terms("body").iterator(null); te.seekCeil(new BytesRef(prefix)); do { String s = te.term().utf8ToString(); Index: lucene/core/src/test/org/apache/lucene/search/TestPrefixQuery.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/TestPrefixQuery.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/search/TestPrefixQuery.java (working copy) @@ -17,10 +17,10 @@ * limitations under the License. */ +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; @@ -56,7 +56,7 @@ assertEquals("One in /Computers/Mac", 1, hits.length); query = new PrefixQuery(new Term("category", "")); - Terms terms = MultiFields.getTerms(searcher.getIndexReader(), "category"); + Terms terms = MultiInvertedFields.getTerms(searcher.getIndexReader(), "category"); assertFalse(query.getTermsEnum(terms) instanceof PrefixTermsEnum); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("everything", 3, hits.length); Index: lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java (working copy) @@ -29,7 +29,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.TextField; import org.apache.lucene.index.AtomicReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; @@ -98,20 +98,20 @@ IndexSearcher searcher = newSearcher(reader); - DocsAndPositionsEnum pos = MultiFields.getTermPositionsEnum(searcher.getIndexReader(), - MultiFields.getLiveDocs(searcher.getIndexReader()), - "field", - new BytesRef("1"), - false); + DocsAndPositionsEnum pos = MultiInvertedFields.getTermPositionsEnum(searcher.getIndexReader(), + MultiInvertedFields.getLiveDocs(searcher.getIndexReader()), + "field", + new BytesRef("1"), + false); pos.nextDoc(); // first token should be at position 0 assertEquals(0, pos.nextPosition()); - pos = MultiFields.getTermPositionsEnum(searcher.getIndexReader(), - MultiFields.getLiveDocs(searcher.getIndexReader()), - "field", - new BytesRef("2"), - false); + pos = MultiInvertedFields.getTermPositionsEnum(searcher.getIndexReader(), + MultiInvertedFields.getLiveDocs(searcher.getIndexReader()), + "field", + new BytesRef("2"), + false); pos.nextDoc(); // second token should be at position 2 assertEquals(2, pos.nextPosition()); Index: lucene/core/src/test/org/apache/lucene/search/TestTermRangeQuery.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/TestTermRangeQuery.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/search/TestTermRangeQuery.java (working copy) @@ -28,7 +28,7 @@ import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.Terms; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; @@ -106,7 +106,7 @@ IndexReader reader = IndexReader.open(dir); IndexSearcher searcher = new IndexSearcher(reader); TermRangeQuery query = new TermRangeQuery("content", null, null, true, true); - Terms terms = MultiFields.getTerms(searcher.getIndexReader(), "content"); + Terms terms = MultiInvertedFields.getTerms(searcher.getIndexReader(), "content"); assertFalse(query.getTermsEnum(terms) instanceof TermRangeTermsEnum); assertEquals(4, searcher.search(query, null, 1000).scoreDocs.length); query = new TermRangeQuery("content", null, null, false, false); Index: lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java (working copy) @@ -24,7 +24,7 @@ import org.apache.lucene.index.CompositeReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermsEnum; @@ -175,7 +175,7 @@ if (terms == null && docCount > minDocsToMakeTerms) { // TODO: try to "focus" on high freq terms sometimes too // TODO: maybe also periodically reset the terms...? - final TermsEnum termsEnum = MultiFields.getTerms(mockReader, "body").iterator(null); + final TermsEnum termsEnum = MultiInvertedFields.getTerms(mockReader, "body").iterator(null); terms = new ArrayList(); while(termsEnum.next() != null) { terms.add(BytesRef.deepCopyOf(termsEnum.term())); Index: lucene/core/src/test/org/apache/lucene/search/TestWildcard.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/TestWildcard.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/search/TestWildcard.java (working copy) @@ -17,13 +17,13 @@ * limitations under the License. */ +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; @@ -131,7 +131,7 @@ MultiTermQuery wq = new WildcardQuery(new Term("field", "prefix*")); assertMatches(searcher, wq, 2); - Terms terms = MultiFields.getTerms(searcher.getIndexReader(), "field"); + Terms terms = MultiInvertedFields.getTerms(searcher.getIndexReader(), "field"); assertTrue(wq.getTermsEnum(terms) instanceof PrefixTermsEnum); wq = new WildcardQuery(new Term("field", "*")); Index: lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java =================================================================== --- lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.document.NumericField; import org.apache.lucene.index.AtomicReaderContext; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.SlowCompositeReaderWrapper; import org.apache.lucene.index.Terms; @@ -465,7 +465,7 @@ } private int countTerms(MultiTermQuery q) throws Exception { - final Terms terms = MultiFields.getTerms(reader, q.getField()); + final Terms terms = MultiInvertedFields.getTerms(reader, q.getField()); if (terms == null) return 0; final TermsEnum termEnum = q.getTermsEnum(terms); Index: lucene/core/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java (working copy) @@ -159,7 +159,7 @@ IndexReader reader = IndexReader.open(dir); try { int numPayloads = 0; - DocsAndPositionsEnum tpe = MultiFields.getTermPositionsEnum(reader, null, field, text, false); + DocsAndPositionsEnum tpe = MultiInvertedFields.getTermPositionsEnum(reader, null, field, text, false); while (tpe.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { tpe.nextPosition(); if (tpe.hasPayload()) { Index: lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java (working copy) @@ -280,7 +280,7 @@ // TODO: offsets } else { - Fields vectors = r.getTermVectors(docID); + InvertedFields vectors = r.getTermVectors(docID); assertTrue(vectors == null || vectors.terms(name) == null); } Index: lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (working copy) @@ -504,7 +504,7 @@ DocsEnum tdocs = _TestUtil.docs(random, reader, t.field(), new BytesRef(t.text()), - MultiFields.getLiveDocs(reader), + MultiInvertedFields.getLiveDocs(reader), null, false); @@ -638,7 +638,7 @@ assertEquals(expected, reader.docFreq(new Term("contents", "here"))); assertEquals(expected, reader.maxDoc()); int numDel = 0; - final Bits liveDocs = MultiFields.getLiveDocs(reader); + final Bits liveDocs = MultiInvertedFields.getLiveDocs(reader); assertNotNull(liveDocs); for(int j=0;j allFieldNames = new HashSet(); Collection indexedFieldNames = new HashSet(); @@ -354,7 +354,7 @@ DocsEnum tdocs = _TestUtil.docs(random, reader, term.field(), new BytesRef(term.text()), - MultiFields.getLiveDocs(reader), + MultiInvertedFields.getLiveDocs(reader), null, false); int count = 0; @@ -553,8 +553,8 @@ assertEquals("Single segment test differs.", index1.getSequentialSubReaders().length == 1, index2.getSequentialSubReaders().length == 1); // check field names - FieldInfos fieldInfos1 = MultiFields.getMergedFieldInfos(index1); - FieldInfos fieldInfos2 = MultiFields.getMergedFieldInfos(index2); + FieldInfos fieldInfos1 = MultiInvertedFields.getMergedFieldInfos(index1); + FieldInfos fieldInfos2 = MultiInvertedFields.getMergedFieldInfos(index2); assertEquals("IndexReaders have different numbers of fields.", fieldInfos1.size(), fieldInfos2.size()); final int numFields = fieldInfos1.size(); for(int fieldID=0;fieldID getDVFields(IndexReader reader) { Set fields = new HashSet(); - for(FieldInfo fi : MultiFields.getMergedFieldInfos(reader)) { + for(FieldInfo fi : MultiInvertedFields.getMergedFieldInfos(reader)) { if (fi.hasDocValues()) { fields.add(fi.name); } @@ -598,8 +597,8 @@ // TODO: this is kinda stupid, we don't delete documents in the test. public void assertDeletedDocs(IndexReader leftReader, IndexReader rightReader) throws Exception { assert leftReader.numDeletedDocs() == rightReader.numDeletedDocs(); - Bits leftBits = MultiFields.getLiveDocs(leftReader); - Bits rightBits = MultiFields.getLiveDocs(rightReader); + Bits leftBits = MultiInvertedFields.getLiveDocs(leftReader); + Bits rightBits = MultiInvertedFields.getLiveDocs(rightReader); if (leftBits == null || rightBits == null) { assertNull(info, leftBits); Index: lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java (working copy) @@ -75,7 +75,7 @@ private void assertSumDocFreq(IndexReader ir) throws Exception { // compute sumDocFreq across all fields - Fields fields = MultiFields.getFields(ir); + InvertedFields fields = MultiInvertedFields.getFields(ir); FieldsEnum fieldEnum = fields.iterator(); String f = null; while ((f = fieldEnum.next()) != null) { Index: lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java (working copy) @@ -213,7 +213,7 @@ DocsEnum tdocs = _TestUtil.docs(random, reader, "field", new BytesRef("aaa"), - MultiFields.getLiveDocs(reader), + MultiInvertedFields.getLiveDocs(reader), null, false); int count = 0; @@ -277,7 +277,7 @@ if (success) { IndexReader reader = IndexReader.open(dir); - final Bits delDocs = MultiFields.getLiveDocs(reader); + final Bits delDocs = MultiInvertedFields.getLiveDocs(reader); for(int j=0;j unsortedTerms = new ArrayList(terms); Collections.shuffle(unsortedTerms, random); @@ -139,7 +139,7 @@ /** mixes up seek and next for all terms */ public void testSeekingAndNexting() throws Exception { for (int i = 0; i < numIterations; i++) { - TermsEnum te = MultiFields.getTerms(reader, "field").iterator(null); + TermsEnum te = MultiInvertedFields.getTerms(reader, "field").iterator(null); for (BytesRef term : terms) { int c = random.nextInt(3); @@ -161,7 +161,7 @@ String reg = AutomatonTestUtil.randomRegexp(random); Automaton automaton = new RegExp(reg, RegExp.NONE).toAutomaton(); CompiledAutomaton ca = new CompiledAutomaton(automaton, SpecialOperations.isFinite(automaton), false); - TermsEnum te = MultiFields.getTerms(reader, "field").intersect(ca, null); + TermsEnum te = MultiInvertedFields.getTerms(reader, "field").intersect(ca, null); Automaton expected = BasicOperations.intersection(termsAutomaton, automaton); TreeSet found = new TreeSet(); while (te.next() != null) { Index: lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java (working copy) @@ -114,7 +114,7 @@ IndexReader reader = IndexReader.open(dir); - TermsEnum tenum = MultiFields.getTerms(reader, "foo").iterator(null); + TermsEnum tenum = MultiInvertedFields.getTerms(reader, "foo").iterator(null); start = System.currentTimeMillis(); @@ -122,7 +122,7 @@ DocsEnum tdocs = null; for (int i=0; i findTerms(IndexReader r) throws IOException { System.out.println("TEST: findTerms"); - final TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator(null); + final TermsEnum termsEnum = MultiInvertedFields.getTerms(r, "field").iterator(null); final List savedTerms = new ArrayList(); int nextSave = _TestUtil.nextInt(random, 500000, 1000000); BytesRef term; @@ -243,7 +242,7 @@ System.out.println("TEST: run " + terms.size() + " terms on reader=" + r); IndexSearcher s = new IndexSearcher(r); Collections.shuffle(terms); - TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator(null); + TermsEnum termsEnum = MultiInvertedFields.getTerms(r, "field").iterator(null); boolean failed = false; for(int iter=0;iter<10*terms.size();iter++) { final BytesRef term = terms.get(random.nextInt(terms.size())); Index: lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java (working copy) @@ -221,7 +221,7 @@ public static int[] toDocsArray(Term term, Bits bits, IndexReader reader) throws IOException { - Fields fields = MultiFields.getFields(reader); + InvertedFields fields = MultiInvertedFields.getFields(reader); Terms cterms = fields.terms(term.field); TermsEnum ctermsEnum = cterms.iterator(null); if (ctermsEnum.seekExact(new BytesRef(term.text()), false)) { Index: lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java (working copy) @@ -78,7 +78,7 @@ IndexReader r = w.getReader(); w.close(); - DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(r, null, "content", new BytesRef("a"), true); + DocsAndPositionsEnum dp = MultiInvertedFields.getTermPositionsEnum(r, null, "content", new BytesRef("a"), true); assertNotNull(dp); assertEquals(0, dp.nextDoc()); assertEquals(2, dp.freq()); @@ -90,7 +90,7 @@ assertEquals(17, dp.endOffset()); assertEquals(DocsEnum.NO_MORE_DOCS, dp.nextDoc()); - dp = MultiFields.getTermPositionsEnum(r, null, "content", new BytesRef("b"), true); + dp = MultiInvertedFields.getTermPositionsEnum(r, null, "content", new BytesRef("b"), true); assertNotNull(dp); assertEquals(0, dp.nextDoc()); assertEquals(1, dp.freq()); @@ -99,7 +99,7 @@ assertEquals(9, dp.endOffset()); assertEquals(DocsEnum.NO_MORE_DOCS, dp.nextDoc()); - dp = MultiFields.getTermPositionsEnum(r, null, "content", new BytesRef("c"), true); + dp = MultiInvertedFields.getTermPositionsEnum(r, null, "content", new BytesRef("c"), true); assertNotNull(dp); assertEquals(0, dp.nextDoc()); assertEquals(1, dp.freq()); @@ -154,7 +154,7 @@ String terms[] = { "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "hundred" }; for (String term : terms) { - DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef(term), true); + DocsAndPositionsEnum dp = MultiInvertedFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef(term), true); int doc; while((doc = dp.nextDoc()) != DocsEnum.NO_MORE_DOCS) { String storedNumbers = reader.document(doc).get("numbers"); @@ -182,7 +182,7 @@ for (int j = 0; j < numSkippingTests; j++) { int num = _TestUtil.nextInt(random, 100, Math.min(numDocs-1, 999)); - DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef("hundred"), true); + DocsAndPositionsEnum dp = MultiInvertedFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef("hundred"), true); int doc = dp.advance(num); assertEquals(num, doc); int freq = dp.freq(); @@ -207,7 +207,7 @@ // check that other fields (without offsets) work correctly for (int i = 0; i < numDocs; i++) { - DocsEnum dp = MultiFields.getTermDocsEnum(reader, null, "id", new BytesRef("" + i), false); + DocsEnum dp = MultiInvertedFields.getTermDocsEnum(reader, null, "id", new BytesRef("" + i), false); assertEquals(i, dp.nextDoc()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, dp.nextDoc()); } Index: lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java (working copy) @@ -103,7 +103,7 @@ throws IOException { IndexReader reader = IndexReader.open(dir); - TermsEnum termEnum = MultiFields.getTerms(reader, "content").iterator(null); + TermsEnum termEnum = MultiInvertedFields.getTerms(reader, "content").iterator(null); // create enumeration of all terms // go to the first term (aaa) Index: lucene/core/src/test/org/apache/lucene/index/TestFlex.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestFlex.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestFlex.java (working copy) @@ -53,7 +53,7 @@ IndexReader r = w.getReader(); - TermsEnum terms = MultiFields.getTerms(r, "field3").iterator(null); + TermsEnum terms = MultiInvertedFields.getTerms(r, "field3").iterator(null); assertEquals(TermsEnum.SeekStatus.END, terms.seekCeil(new BytesRef("abc"))); r.close(); } Index: lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java (working copy) @@ -135,7 +135,7 @@ DocsEnum tdocs = _TestUtil.docs(random, reader, ta.field(), new BytesRef(ta.text()), - MultiFields.getLiveDocs(reader), + MultiInvertedFields.getLiveDocs(reader), null, true); @@ -160,7 +160,7 @@ tdocs = _TestUtil.docs(random, reader, ta.field(), new BytesRef(ta.text()), - MultiFields.getLiveDocs(reader), + MultiInvertedFields.getLiveDocs(reader), null, false); @@ -178,7 +178,7 @@ tdocs = _TestUtil.docs(random, reader, tb.field(), new BytesRef(tb.text()), - MultiFields.getLiveDocs(reader), + MultiInvertedFields.getLiveDocs(reader), null, true); @@ -202,7 +202,7 @@ tdocs = _TestUtil.docs(random, reader, tb.field(), new BytesRef(tb.text()), - MultiFields.getLiveDocs(reader), + MultiInvertedFields.getLiveDocs(reader), null, true); @@ -222,7 +222,7 @@ tdocs = _TestUtil.docs(random, reader, tc.field(), new BytesRef(tc.text()), - MultiFields.getLiveDocs(reader), + MultiInvertedFields.getLiveDocs(reader), null, true); @@ -248,7 +248,7 @@ tdocs = _TestUtil.docs(random, reader, tc.field(), new BytesRef(tc.text()), - MultiFields.getLiveDocs(reader), + MultiInvertedFields.getLiveDocs(reader), null, false); assertTrue(tdocs.advance(5) != DocsEnum.NO_MORE_DOCS); Index: lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java (working copy) @@ -30,7 +30,6 @@ import org.apache.lucene.document.TextField; import org.apache.lucene.search.*; import org.apache.lucene.search.BooleanClause.Occur; -import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.TFIDFSimilarity; import org.apache.lucene.store.Directory; @@ -438,8 +437,8 @@ iw.addDocument(doc); IndexReader ir = iw.getReader(); iw.close(); - Terms terms = MultiFields.getTerms(ir, "foo"); - assertEquals(-1, MultiFields.totalTermFreq(ir, "foo", new BytesRef("bar"))); + Terms terms = MultiInvertedFields.getTerms(ir, "foo"); + assertEquals(-1, MultiInvertedFields.totalTermFreq(ir, "foo", new BytesRef("bar"))); assertEquals(-1, terms.getSumTotalTermFreq()); ir.close(); dir.close(); Index: lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java (working copy) @@ -52,7 +52,7 @@ int count = 0; DocsEnum td = _TestUtil.docs(random, r, t.field(), new BytesRef(t.text()), - MultiFields.getLiveDocs(r), + MultiInvertedFields.getLiveDocs(r), null, false); Index: lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java (working copy) @@ -19,10 +19,8 @@ import java.io.IOException; -import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Document; -import org.apache.lucene.document.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.InfoStream; @@ -101,7 +99,7 @@ DocsEnum termDocs = _TestUtil.docs(random, mergedReader, DocHelper.TEXT_FIELD_2_KEY, new BytesRef("field"), - MultiFields.getLiveDocs(mergedReader), + MultiInvertedFields.getLiveDocs(mergedReader), null, false); assertTrue(termDocs != null); Index: lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java (working copy) @@ -62,7 +62,7 @@ w.close(); final List terms = new ArrayList(); - final TermsEnum termsEnum = MultiFields.getTerms(r, "body").iterator(null); + final TermsEnum termsEnum = MultiInvertedFields.getTerms(r, "body").iterator(null); BytesRef term; while((term = termsEnum.next()) != null) { terms.add(BytesRef.deepCopyOf(term)); @@ -304,7 +304,7 @@ } } - final TermsEnum te = MultiFields.getTerms(r, "f").intersect(c, startTerm); + final TermsEnum te = MultiInvertedFields.getTerms(r, "f").intersect(c, startTerm); int loc; if (startTerm == null) { @@ -484,7 +484,7 @@ assertEquals(1, docFreq(r, "xx")); assertEquals(1, docFreq(r, "aa4")); - final TermsEnum te = MultiFields.getTerms(r, FIELD).iterator(null); + final TermsEnum te = MultiInvertedFields.getTerms(r, FIELD).iterator(null); while(te.next() != null) { //System.out.println("TEST: next term=" + te.term().utf8ToString()); } @@ -514,7 +514,7 @@ w.close(); assertEquals(1, r.numDocs()); assertEquals(1, r.maxDoc()); - Terms terms = MultiFields.getTerms(r, "field"); + Terms terms = MultiInvertedFields.getTerms(r, "field"); if (terms != null) { assertNull(terms.iterator(null).next()); } @@ -616,7 +616,7 @@ System.out.println(" " + t.utf8ToString() + " " + t); } } - final TermsEnum te = MultiFields.getTerms(r, FIELD).iterator(null); + final TermsEnum te = MultiInvertedFields.getTerms(r, FIELD).iterator(null); final int END_LOC = -validTerms.length-1; Index: lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java (working copy) @@ -153,11 +153,11 @@ writer.close(); IndexReader reader = IndexReader.open(directory); - DocsAndPositionsEnum tp = MultiFields.getTermPositionsEnum(reader, - MultiFields.getLiveDocs(reader), - this.field, - new BytesRef("b"), - false); + DocsAndPositionsEnum tp = MultiInvertedFields.getTermPositionsEnum(reader, + MultiInvertedFields.getLiveDocs(reader), + this.field, + new BytesRef("b"), + false); for (int i = 0; i < 10; i++) { tp.nextDoc(); @@ -165,11 +165,11 @@ assertEquals(tp.nextPosition(), 1); } - tp = MultiFields.getTermPositionsEnum(reader, - MultiFields.getLiveDocs(reader), - this.field, - new BytesRef("a"), - false); + tp = MultiInvertedFields.getTermPositionsEnum(reader, + MultiInvertedFields.getLiveDocs(reader), + this.field, + new BytesRef("a"), + false); for (int i = 0; i < 10; i++) { tp.nextDoc(); Index: lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java (working copy) @@ -136,7 +136,7 @@ } private void checkTermsOrder(IndexReader r, Set allTerms, boolean isTop) throws IOException { - TermsEnum terms = MultiFields.getFields(r).terms("f").iterator(null); + TermsEnum terms = MultiInvertedFields.getFields(r).terms("f").iterator(null); BytesRef last = new BytesRef(); Index: lucene/core/src/test/org/apache/lucene/index/TestDocCount.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestDocCount.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestDocCount.java (working copy) @@ -60,7 +60,7 @@ } private void verifyCount(IndexReader ir) throws Exception { - Fields fields = MultiFields.getFields(ir); + InvertedFields fields = MultiInvertedFields.getFields(ir); if (fields == null) { return; } Index: lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java (working copy) @@ -312,24 +312,24 @@ int[] r2r1 = new int[r2.maxDoc()]; // r2 id to r1 id mapping // create mapping from id2 space to id2 based on idField - final Fields f1 = MultiFields.getFields(r1); + final InvertedFields f1 = MultiInvertedFields.getFields(r1); if (f1 == null) { // make sure r2 is empty - assertNull(MultiFields.getFields(r2)); + assertNull(MultiInvertedFields.getFields(r2)); return; } final Terms terms1 = f1.terms(idField); if (terms1 == null) { - assertTrue(MultiFields.getFields(r2) == null || - MultiFields.getFields(r2).terms(idField) == null); + assertTrue(MultiInvertedFields.getFields(r2) == null || + MultiInvertedFields.getFields(r2).terms(idField) == null); return; } final TermsEnum termsEnum = terms1.iterator(null); - final Bits liveDocs1 = MultiFields.getLiveDocs(r1); - final Bits liveDocs2 = MultiFields.getLiveDocs(r2); + final Bits liveDocs1 = MultiInvertedFields.getLiveDocs(r1); + final Bits liveDocs2 = MultiInvertedFields.getLiveDocs(r2); - Fields fields = MultiFields.getFields(r2); + InvertedFields fields = MultiInvertedFields.getFields(r2); if (fields == null) { // make sure r1 is in fact empty (eg has only all // deleted docs): @@ -392,7 +392,7 @@ verifyEquals(r1.getTermVectors(id1), r2.getTermVectors(id2)); } catch (Throwable e) { System.out.println("FAILED id=" + term + " id1=" + id1 + " id2=" + id2); - Fields tv1 = r1.getTermVectors(id1); + InvertedFields tv1 = r1.getTermVectors(id1); System.out.println(" d1=" + tv1); if (tv1 != null) { FieldsEnum fieldsEnum = tv1.iterator(); @@ -426,7 +426,7 @@ } } - Fields tv2 = r2.getTermVectors(id2); + InvertedFields tv2 = r2.getTermVectors(id2); System.out.println(" d2=" + tv2); if (tv2 != null) { FieldsEnum fieldsEnum = tv2.iterator(); @@ -468,8 +468,8 @@ // Verify postings //System.out.println("TEST: create te1"); - final FieldsEnum fields1 = MultiFields.getFields(r1).iterator(); - final FieldsEnum fields2 = MultiFields.getFields(r2).iterator(); + final FieldsEnum fields1 = MultiInvertedFields.getFields(r1).iterator(); + final FieldsEnum fields2 = MultiInvertedFields.getFields(r2).iterator(); String field1=null, field2=null; TermsEnum termsEnum1 = null; @@ -597,7 +597,7 @@ } } - public static void verifyEquals(Fields d1, Fields d2) throws IOException { + public static void verifyEquals(InvertedFields d1, InvertedFields d2) throws IOException { if (d1 == null) { assertTrue(d2 == null || d2.getUniqueFieldCount() == 0); return; Index: lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java (working copy) @@ -246,7 +246,7 @@ } } - // Fields 1-4 indexed together: + // InvertedFields 1-4 indexed together: private IndexSearcher single(Random random) throws IOException { dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); @@ -280,7 +280,7 @@ return newSearcher(ir); } - // Fields 1 & 2 in one index, 3 & 4 in other, with ParallelReader: + // InvertedFields 1 & 2 in one index, 3 & 4 in other, with ParallelReader: private IndexSearcher parallel(Random random) throws IOException { dir1 = getDir1(random); dir2 = getDir2(random); Index: lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -542,7 +542,7 @@ DocsEnum td = _TestUtil.docs(random, reader, "field", new BytesRef("a"), - MultiFields.getLiveDocs(reader), + MultiInvertedFields.getLiveDocs(reader), null, true); td.nextDoc(); @@ -897,11 +897,11 @@ hits = s.search(q, null, 1000).scoreDocs; assertEquals(1, hits.length); - DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(s.getIndexReader(), - MultiFields.getLiveDocs(s.getIndexReader()), - "field", - new BytesRef("a"), - false); + DocsAndPositionsEnum tps = MultiInvertedFields.getTermPositionsEnum(s.getIndexReader(), + MultiInvertedFields.getLiveDocs(s.getIndexReader()), + "field", + new BytesRef("a"), + false); assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals(1, tps.freq()); @@ -1637,7 +1637,7 @@ // Make sure position is still incremented when // massive term is skipped: - DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(reader, null, "content", new BytesRef("another"), false); + DocsAndPositionsEnum tps = MultiInvertedFields.getTermPositionsEnum(reader, null, "content", new BytesRef("another"), false); assertEquals(0, tps.nextDoc()); assertEquals(1, tps.freq()); assertEquals(3, tps.nextPosition()); Index: lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java (working copy) @@ -109,7 +109,7 @@ System.out.println("TEST: reader=" + reader); } - Bits liveDocs = MultiFields.getLiveDocs(reader); + Bits liveDocs = MultiInvertedFields.getLiveDocs(reader); for(int delDoc : deleted) { assertFalse(liveDocs.get(delDoc)); } @@ -141,7 +141,7 @@ DocsEnum docs = _TestUtil.docs(random, r, "field", new BytesRef(term), - MultiFields.getLiveDocs(r), + MultiInvertedFields.getLiveDocs(r), null, false); for(int docID : expected) { Index: lucene/core/src/test/org/apache/lucene/index/TestCodecs.java =================================================================== --- lucene/core/src/test/org/apache/lucene/index/TestCodecs.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/index/TestCodecs.java (working copy) @@ -24,10 +24,10 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; +import org.apache.lucene.codecs.InvertedTermsConsumer; import org.apache.lucene.codecs.PostingsConsumer; import org.apache.lucene.codecs.TermStats; -import org.apache.lucene.codecs.TermsConsumer; import org.apache.lucene.codecs.lucene3x.Lucene3xCodec; import org.apache.lucene.codecs.mocksep.MockSepPostingsFormat; import org.apache.lucene.document.Document; @@ -108,7 +108,7 @@ public void write(final FieldsConsumer consumer) throws Throwable { Arrays.sort(terms); - final TermsConsumer termsConsumer = consumer.addField(fieldInfo); + final InvertedTermsConsumer termsConsumer = consumer.addField(fieldInfo); long sumTotalTermCount = 0; long sumDF = 0; OpenBitSet visitedDocs = new OpenBitSet(); @@ -151,7 +151,7 @@ return text.compareTo(((TermData) o).text); } - public long write(final TermsConsumer termsConsumer) throws Throwable { + public long write(final InvertedTermsConsumer termsConsumer) throws Throwable { final PostingsConsumer postingsConsumer = termsConsumer.startTerm(text); long totTF = 0; for(int i=0;i 0); } - Fields results = reader.getTermVectors(0); + InvertedFields results = reader.getTermVectors(0); assertTrue(results != null); assertEquals("We do not have 3 term freq vectors", 3, results.getUniqueFieldCount()); } Index: lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java =================================================================== --- lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java (working copy) @@ -37,7 +37,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; @@ -1122,7 +1122,7 @@ System.out.println("FST stores docFreq"); } } - Terms terms = MultiFields.getTerms(r, "body"); + Terms terms = MultiInvertedFields.getTerms(r, "body"); if (terms != null) { final IntsRef scratchIntsRef = new IntsRef(); final TermsEnum termsEnum = terms.iterator(null); @@ -1671,7 +1671,7 @@ } // Verify w/ MultiTermsEnum - final TermsEnum termsEnum = MultiFields.getTerms(r, "id").iterator(null); + final TermsEnum termsEnum = MultiInvertedFields.getTerms(r, "id").iterator(null); for(int iter=0;iter<2*NUM_IDS;iter++) { final String id; final String nextID; Index: lucene/core/src/test/org/apache/lucene/document/TestDocument.java =================================================================== --- lucene/core/src/test/org/apache/lucene/document/TestDocument.java (revision 1245069) +++ lucene/core/src/test/org/apache/lucene/document/TestDocument.java (working copy) @@ -20,9 +20,8 @@ import java.io.StringReader; import org.apache.lucene.analysis.EmptyTokenizer; -import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.index.DocsAndPositionsEnum; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.RandomIndexWriter; @@ -338,7 +337,7 @@ assertEquals(1, s.search(new TermQuery(new Term("tokenized_tokenstream", "xyz")), 1).totalHits); for(String field : new String[] {"tv", "tv_pos", "tv_off", "tv_pos_off"}) { - Fields tvFields = r.getTermVectors(0); + InvertedFields tvFields = r.getTermVectors(0); Terms tvs = tvFields.terms(field); assertNotNull(tvs); assertEquals(2, tvs.getUniqueTermCount()); Index: lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsReader.java (working copy) @@ -30,7 +30,7 @@ import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.FieldsEnum; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexFormatTooNewException; @@ -225,12 +225,12 @@ return size; } - private class TVFields extends Fields { + private class TVInvertedFields extends InvertedFields { private final int[] fieldNumbers; private final long[] fieldFPs; private final Map fieldNumberToIndex = new HashMap(); - public TVFields(int docID) throws IOException { + public TVInvertedFields(int docID) throws IOException { seekTvx(docID); tvd.seek(tvx.readLong()); @@ -277,7 +277,7 @@ @Override public Terms terms() throws IOException { - return TVFields.this.terms(fieldInfos.fieldName(fieldNumbers[fieldUpto-1])); + return TVInvertedFields.this.terms(fieldInfos.fieldName(fieldNumbers[fieldUpto-1])); } }; } @@ -663,12 +663,12 @@ } @Override - public Fields get(int docID) throws IOException { + public InvertedFields get(int docID) throws IOException { if (docID < 0 || docID >= numTotalDocs) { throw new IllegalArgumentException("doID=" + docID + " is out of bounds [0.." + (numTotalDocs-1) + "]"); } if (tvx != null) { - Fields fields = new TVFields(docID); + InvertedFields fields = new TVInvertedFields(docID); if (fields.getUniqueFieldCount() == 0) { // TODO: we can improve writer here, eg write 0 into // tvx file, so we know on first read from tvx that Index: lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsWriter.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsWriter.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsWriter.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.codecs.TermVectorsReader; import org.apache.lucene.codecs.TermVectorsWriter; import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.MergePolicy.MergeAbortedException; import org.apache.lucene.index.MergeState; @@ -309,7 +309,7 @@ // NOTE: it's very important to first assign to vectors then pass it to // termVectorsWriter.addAllDocVectors; see LUCENE-1282 - Fields vectors = reader.reader.getTermVectors(docNum); + InvertedFields vectors = reader.reader.getTermVectors(docNum); addAllDocVectors(vectors, mergeState.fieldInfos); totalNumDocs++; mergeState.checkAbort.work(300); @@ -339,7 +339,7 @@ for (int docNum = 0; docNum < maxDoc; docNum++) { // NOTE: it's very important to first assign to vectors then pass it to // termVectorsWriter.addAllDocVectors; see LUCENE-1282 - Fields vectors = reader.reader.getTermVectors(docNum); + InvertedFields vectors = reader.reader.getTermVectors(docNum); addAllDocVectors(vectors, mergeState.fieldInfos); mergeState.checkAbort.work(300); } Index: lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsReader.java (working copy) @@ -52,9 +52,9 @@ private int size; private boolean closed; - /** Returns a cloned FieldsReader that shares open + /** Returns a cloned InvertedInvertedFieldsReader that shares open * IndexInputs with the original one. It is the caller's - * job not to close the original FieldsReader until all + * job not to close the original InvertedInvertedFieldsReader until all * clones are called (eg, currently SegmentReader manages * this logic). */ @Override @@ -108,17 +108,17 @@ } /** - * @throws AlreadyClosedException if this FieldsReader is closed + * @throws AlreadyClosedException if this InvertedInvertedFieldsReader is closed */ private void ensureOpen() throws AlreadyClosedException { if (closed) { - throw new AlreadyClosedException("this FieldsReader is closed"); + throw new AlreadyClosedException("this InvertedInvertedFieldsReader is closed"); } } /** * Closes the underlying {@link org.apache.lucene.store.IndexInput} streams. - * This means that the Fields values will not be accessible. + * This means that the InvertedFields values will not be accessible. * * @throws IOException */ Index: lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40PostingsFormat.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40PostingsFormat.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40PostingsFormat.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.codecs.BlockTreeTermsReader; import org.apache.lucene.codecs.BlockTreeTermsWriter; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.PostingsReaderBase; import org.apache.lucene.codecs.PostingsWriterBase; @@ -76,12 +76,12 @@ public final static int TERMS_CACHE_SIZE = 1024; @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + public InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postings = new Lucene40PostingsReader(state.dir, state.segmentInfo, state.context, state.segmentSuffix); boolean success = false; try { - FieldsProducer ret = new BlockTreeTermsReader( + InvertedInvertedFieldsProducer ret = new BlockTreeTermsReader( state.dir, state.fieldInfos, state.segmentInfo.name, Index: lucene/core/src/java/org/apache/lucene/codecs/FieldsConsumer.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/FieldsConsumer.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/FieldsConsumer.java (working copy) @@ -21,7 +21,7 @@ import java.io.IOException; import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.FieldsEnum; import org.apache.lucene.index.MergeState; import org.apache.lucene.index.Terms; @@ -36,12 +36,12 @@ public abstract class FieldsConsumer implements Closeable { /** Add a new field */ - public abstract TermsConsumer addField(FieldInfo field) throws IOException; + public abstract InvertedTermsConsumer addField(FieldInfo field) throws IOException; /** Called when we are done adding everything. */ public abstract void close() throws IOException; - public void merge(MergeState mergeState, Fields fields) throws IOException { + public void merge(MergeState mergeState, InvertedFields fields) throws IOException { FieldsEnum fieldsEnum = fields.iterator(); assert fieldsEnum != null; String field; @@ -50,7 +50,7 @@ assert mergeState.fieldInfo != null : "FieldInfo for field is null: "+ field; Terms terms = fieldsEnum.terms(); if (terms != null) { - final TermsConsumer termsConsumer = addField(mergeState.fieldInfo); + final InvertedTermsConsumer termsConsumer = addField(mergeState.fieldInfo); termsConsumer.merge(mergeState, terms.iterator(null)); } } Index: lucene/core/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java (working copy) @@ -25,11 +25,11 @@ import java.util.TreeMap; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.codecs.PostingsConsumer; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.TermStats; -import org.apache.lucene.codecs.TermsConsumer; +import org.apache.lucene.codecs.InvertedTermsConsumer; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.FieldInfo.IndexOptions; @@ -100,7 +100,7 @@ private static final boolean VERBOSE = false; - private final static class TermsWriter extends TermsConsumer { + private final static class InvertedTermsWriter extends InvertedTermsConsumer { private final IndexOutput out; private final FieldInfo field; private final Builder builder; @@ -108,7 +108,7 @@ private final boolean doPackFST; private int termCount; - public TermsWriter(IndexOutput out, FieldInfo field, boolean doPackFST) { + public InvertedTermsWriter(IndexOutput out, FieldInfo field, boolean doPackFST) { this.out = out; this.field = field; this.doPackFST = doPackFST; @@ -269,12 +269,12 @@ return new FieldsConsumer() { @Override - public TermsConsumer addField(FieldInfo field) { + public InvertedTermsConsumer addField(FieldInfo field) { if (field.indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0) { throw new UnsupportedOperationException("this codec cannot index offsets"); } if (VERBOSE) System.out.println("\naddField field=" + field.name); - return new TermsWriter(out, field, doPackFST); + return new InvertedTermsWriter(out, field, doPackFST); } @Override @@ -781,7 +781,7 @@ } @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + public InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState state) throws IOException { final String fileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, EXTENSION); final IndexInput in = state.dir.openInput(fileName, IOContext.READONCE); @@ -803,7 +803,7 @@ in.close(); } - return new FieldsProducer() { + return new InvertedInvertedFieldsProducer() { @Override public FieldsEnum iterator() { final Iterator iter = fields.values().iterator(); Index: lucene/core/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xFields.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xFields.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xFields.java (working copy) @@ -25,7 +25,7 @@ import java.util.Map; import java.util.TreeMap; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.FieldInfo.IndexOptions; @@ -49,7 +49,7 @@ * @deprecated (4.0) */ @Deprecated -class Lucene3xFields extends FieldsProducer { +class Lucene3xFields extends InvertedInvertedFieldsProducer { private static final boolean DEBUG_SURROGATES = false; Index: lucene/core/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xTermVectorsReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xTermVectorsReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xTermVectorsReader.java (working copy) @@ -30,7 +30,7 @@ import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.FieldsEnum; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexFormatTooNewException; @@ -195,7 +195,7 @@ return size; } - private class TVFields extends Fields { + private class TVFields extends InvertedFields { private final int[] fieldNumbers; private final long[] fieldFPs; private final Map fieldNumberToIndex = new HashMap(); @@ -654,12 +654,12 @@ } @Override - public Fields get(int docID) throws IOException { + public InvertedFields get(int docID) throws IOException { if (docID < 0 || docID >= numTotalDocs) { throw new IllegalArgumentException("doID=" + docID + " is out of bounds [0.." + (numTotalDocs-1) + "]"); } if (tvx != null) { - Fields fields = new TVFields(docID); + InvertedFields fields = new TVFields(docID); if (fields.getUniqueFieldCount() == 0) { // TODO: we can improve writer here, eg write 0 into // tvx file, so we know on first read from tvx that Index: lucene/core/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xStoredFieldsReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xStoredFieldsReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xStoredFieldsReader.java (working copy) @@ -98,9 +98,9 @@ // (lucene 3.0 indexes only), we privately open our own fd. private final CompoundFileDirectory storeCFSReader; - /** Returns a cloned FieldsReader that shares open + /** Returns a cloned InvertedInvertedFieldsReader that shares open * IndexInputs with the original one. It is the caller's - * job not to close the original FieldsReader until all + * job not to close the original InvertedInvertedFieldsReader until all * clones are called (eg, currently SegmentReader manages * this logic). */ @Override @@ -195,17 +195,17 @@ } /** - * @throws AlreadyClosedException if this FieldsReader is closed + * @throws AlreadyClosedException if this InvertedInvertedFieldsReader is closed */ private void ensureOpen() throws AlreadyClosedException { if (closed) { - throw new AlreadyClosedException("this FieldsReader is closed"); + throw new AlreadyClosedException("this InvertedInvertedFieldsReader is closed"); } } /** * Closes the underlying {@link org.apache.lucene.store.IndexInput} streams. - * This means that the Fields values will not be accessible. + * This means that the InvertedFields values will not be accessible. * * @throws IOException */ Index: lucene/core/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xPostingsFormat.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xPostingsFormat.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xPostingsFormat.java (working copy) @@ -21,7 +21,7 @@ import java.io.IOException; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.index.SegmentWriteState; @@ -60,7 +60,7 @@ } @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + public InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState state) throws IOException { return new Lucene3xFields(state.dir, state.fieldInfos, state.segmentInfo, state.context, state.termsIndexDivisor); } Index: lucene/core/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xSegmentInfosReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xSegmentInfosReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene3x/Lucene3xSegmentInfosReader.java (working copy) @@ -72,7 +72,7 @@ // Above call succeeded, so it's a 3.0 segment. Upgrade it so the next // time the segment is read, its version won't be null and we won't - // need to open FieldsReader every time for each such segment. + // need to open InvertedInvertedFieldsReader every time for each such segment. si.setVersion("3.0"); } else if (si.getVersion().equals("2.x")) { // If it's a 3x index touched by 3.1+ code, then segments record their Index: lucene/core/src/java/org/apache/lucene/codecs/BlockTermsReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/BlockTermsReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/BlockTermsReader.java (working copy) @@ -56,7 +56,7 @@ * implementation of the terms dict index. * @lucene.experimental */ -public class BlockTermsReader extends FieldsProducer { +public class BlockTermsReader extends InvertedInvertedFieldsProducer { // Open input to the main terms dict file (_X.tis) private final IndexInput in; Index: lucene/core/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsFormat.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsFormat.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsFormat.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.codecs.BlockTreeTermsReader; import org.apache.lucene.codecs.BlockTreeTermsWriter; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.codecs.PostingsBaseFormat; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.PostingsReaderBase; @@ -87,14 +87,14 @@ } @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + public InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase docsReader = wrappedPostingsBaseFormat.postingsReaderBase(state); PostingsReaderBase pulsingReader = new PulsingPostingsReader(docsReader); boolean success = false; try { - FieldsProducer ret = new BlockTreeTermsReader( + InvertedInvertedFieldsProducer ret = new BlockTreeTermsReader( state.dir, state.fieldInfos, state.segmentInfo.name, pulsingReader, state.context, Index: lucene/core/src/java/org/apache/lucene/codecs/BlockTermsWriter.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/BlockTermsWriter.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/BlockTermsWriter.java (working copy) @@ -64,7 +64,7 @@ final FieldInfos fieldInfos; FieldInfo currentField; private final TermsIndexWriterBase termsIndexWriter; - private final List fields = new ArrayList(); + private final List fields = new ArrayList(); // private final String segment; @@ -100,12 +100,12 @@ } @Override - public TermsConsumer addField(FieldInfo field) throws IOException { + public InvertedTermsConsumer addField(FieldInfo field) throws IOException { //System.out.println("\nBTW.addField seg=" + segment + " field=" + field.name); assert currentField == null || currentField.name.compareTo(field.name) < 0; currentField = field; TermsIndexWriterBase.FieldWriter fieldIndexWriter = termsIndexWriter.addField(field, out.getFilePointer()); - final TermsWriter terms = new TermsWriter(fieldIndexWriter, field, postingsWriter); + final InvertedTermsWriter terms = new InvertedTermsWriter(fieldIndexWriter, field, postingsWriter); fields.add(terms); return terms; } @@ -116,7 +116,7 @@ try { int nonZeroCount = 0; - for(TermsWriter field : fields) { + for(InvertedTermsWriter field : fields) { if (field.numTerms > 0) { nonZeroCount++; } @@ -125,7 +125,7 @@ final long dirStart = out.getFilePointer(); out.writeVInt(nonZeroCount); - for(TermsWriter field : fields) { + for(InvertedTermsWriter field : fields) { if (field.numTerms > 0) { out.writeVInt(field.fieldInfo.number); out.writeVLong(field.numTerms); @@ -153,7 +153,7 @@ public TermStats stats; } - class TermsWriter extends TermsConsumer { + class InvertedTermsWriter extends InvertedTermsConsumer { private final FieldInfo fieldInfo; private final PostingsWriterBase postingsWriter; private final long termsStartPointer; @@ -167,7 +167,7 @@ private int pendingCount; - TermsWriter( + InvertedTermsWriter( TermsIndexWriterBase.FieldWriter fieldIndexWriter, FieldInfo fieldInfo, PostingsWriterBase postingsWriter) Index: lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java (working copy) @@ -17,11 +17,11 @@ * limitations under the License. */ +import org.apache.lucene.codecs.InvertedTermsConsumer; import org.apache.lucene.util.BytesRef; import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.PostingsConsumer; import org.apache.lucene.codecs.TermStats; -import org.apache.lucene.codecs.TermsConsumer; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfo.IndexOptions; import org.apache.lucene.index.SegmentWriteState; @@ -63,17 +63,17 @@ } @Override - public TermsConsumer addField(FieldInfo field) throws IOException { + public InvertedTermsConsumer addField(FieldInfo field) throws IOException { write(FIELD); write(field.name); newline(); - return new SimpleTextTermsWriter(field); + return new SimpleTextInvertedTermsWriter(field); } - private class SimpleTextTermsWriter extends TermsConsumer { + private class SimpleTextInvertedTermsWriter extends InvertedTermsConsumer { private final SimpleTextPostingsWriter postingsWriter; - public SimpleTextTermsWriter(FieldInfo field) { + public SimpleTextInvertedTermsWriter(FieldInfo field) { postingsWriter = new SimpleTextPostingsWriter(field); } Index: lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java (working copy) @@ -29,7 +29,7 @@ import org.apache.lucene.codecs.TermVectorsReader; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.DocsEnum; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.FieldsEnum; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.SegmentInfo; @@ -94,7 +94,7 @@ } @Override - public Fields get(int doc) throws IOException { + public InvertedFields get(int doc) throws IOException { // TestTV tests for this in testBadParams... but is this // really guaranteed by the API? if (doc < 0 || doc >= offsets.size()) { @@ -180,7 +180,7 @@ } } } - return new SimpleTVFields(fields); + return new SimpleTVInvertedFields(fields); } @Override @@ -221,10 +221,10 @@ return scratchUTF16.toString(); } - private class SimpleTVFields extends Fields { + private class SimpleTVInvertedFields extends InvertedFields { private final SortedMap fields; - SimpleTVFields(SortedMap fields) throws IOException { + SimpleTVInvertedFields(SortedMap fields) throws IOException { this.fields = fields; } Index: lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsReader.java (working copy) @@ -163,7 +163,7 @@ @Override public StoredFieldsReader clone() { if (in == null) { - throw new AlreadyClosedException("this FieldsReader is closed"); + throw new AlreadyClosedException("this InvertedInvertedFieldsReader is closed"); } return new SimpleTextStoredFieldsReader(offsets, (IndexInput) in.clone(), fieldInfos); } Index: lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPostingsFormat.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPostingsFormat.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextPostingsFormat.java (working copy) @@ -21,7 +21,7 @@ import java.util.Set; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.index.SegmentWriteState; @@ -48,8 +48,8 @@ } @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { - return new SimpleTextFieldsReader(state); + public InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + return new SimpleTextInvertedInvertedFieldsReader(state); } /** Extension of freq postings file */ Index: lucene/core/src/java/org/apache/lucene/codecs/PostingsFormat.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/PostingsFormat.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/PostingsFormat.java (working copy) @@ -54,7 +54,7 @@ /** Reads a segment. NOTE: by the time this call * returns, it must hold open any files it will need to * use; else, those files may be deleted. */ - public abstract FieldsProducer fieldsProducer(SegmentReadState state) throws IOException; + public abstract InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState state) throws IOException; /** * Gathers files associated with this segment Index: lucene/core/src/java/org/apache/lucene/codecs/appending/AppendingPostingsFormat.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/appending/AppendingPostingsFormat.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/appending/AppendingPostingsFormat.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.codecs.BlockTreeTermsReader; import org.apache.lucene.codecs.BlockTreeTermsWriter; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.PostingsReaderBase; import org.apache.lucene.codecs.PostingsWriterBase; @@ -32,7 +32,6 @@ import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SegmentWriteState; -import org.apache.lucene.store.Directory; /** * Appending postings impl @@ -60,12 +59,12 @@ } @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + public InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postings = new Lucene40PostingsReader(state.dir, state.segmentInfo, state.context, state.segmentSuffix); boolean success = false; try { - FieldsProducer ret = new AppendingTermsReader( + InvertedInvertedFieldsProducer ret = new AppendingTermsReader( state.dir, state.fieldInfos, state.segmentInfo.name, Index: lucene/core/src/java/org/apache/lucene/codecs/TermVectorsReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/TermVectorsReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/TermVectorsReader.java (working copy) @@ -22,7 +22,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; // javadocs import org.apache.lucene.index.DocsAndPositionsEnum; // javadocs -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; /** * Codec API for reading term vectors: @@ -35,7 +35,7 @@ * term vectors were not indexed. If offsets are * available they are in an {@link OffsetAttribute} * available from the {@link DocsAndPositionsEnum}. */ - public abstract Fields get(int doc) throws IOException; + public abstract InvertedFields get(int doc) throws IOException; /** Create a clone that one caller at a time may use to * read term vectors. */ Index: lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsReader.java (working copy) @@ -35,7 +35,6 @@ import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.index.TermState; import org.apache.lucene.index.Terms; -import org.apache.lucene.index.TermsEnum.SeekStatus; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.store.Directory; @@ -86,7 +85,7 @@ * @lucene.experimental */ -public class BlockTreeTermsReader extends FieldsProducer { +public class BlockTreeTermsReader extends InvertedInvertedFieldsProducer { // Open input to the main terms dict file (_X.tib) private final IndexInput in; Index: lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java (working copy) @@ -25,7 +25,7 @@ import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.FieldsEnum; import org.apache.lucene.index.MergeState; import org.apache.lucene.index.Terms; @@ -153,7 +153,7 @@ } // NOTE: it's very important to first assign to vectors then pass it to // termVectorsWriter.addAllDocVectors; see LUCENE-1282 - Fields vectors = reader.reader.getTermVectors(docID); + InvertedFields vectors = reader.reader.getTermVectors(docID); addAllDocVectors(vectors, mergeState.fieldInfos); docCount++; mergeState.checkAbort.work(300); @@ -166,9 +166,9 @@ /** Safe (but, slowish) default method to write every * vector field in the document. This default * implementation requires that the vectors implement - * both Fields.getUniqueFieldCount and + * both InvertedFields.getUniqueFieldCount and * Terms.getUniqueTermCount. */ - protected final void addAllDocVectors(Fields vectors, FieldInfos fieldInfos) throws IOException { + protected final void addAllDocVectors(InvertedFields vectors, FieldInfos fieldInfos) throws IOException { if (vectors == null) { startDocument(0); return; Index: lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java (working copy) @@ -28,9 +28,9 @@ import java.util.TreeMap; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.TermsConsumer; +import org.apache.lucene.codecs.InvertedTermsConsumer; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldsEnum; import org.apache.lucene.index.IndexFileNames; @@ -99,7 +99,7 @@ } @Override - public TermsConsumer addField(FieldInfo field) throws IOException { + public InvertedTermsConsumer addField(FieldInfo field) throws IOException { final PostingsFormat format = getPostingsFormatForField(field.name); if (format == null) { throw new IllegalStateException("invalid null PostingsFormat for field=\"" + field.name + "\""); @@ -173,12 +173,12 @@ } } - private class FieldsReader extends FieldsProducer { + private class InvertedInvertedFieldsReader extends InvertedInvertedFieldsProducer { - private final Map fields = new TreeMap(); - private final Map formats = new IdentityHashMap(); + private final Map fields = new TreeMap(); + private final Map formats = new IdentityHashMap(); - public FieldsReader(final SegmentReadState readState) throws IOException { + public InvertedInvertedFieldsReader(final SegmentReadState readState) throws IOException { // Read _X.per and init each format: boolean success = false; @@ -235,7 +235,7 @@ @Override public Terms terms(String field) throws IOException { - FieldsProducer fieldsProducer = fields.get(field); + InvertedInvertedFieldsProducer fieldsProducer = fields.get(field); return fieldsProducer == null ? null : fieldsProducer.terms(field); } @@ -251,9 +251,9 @@ } @Override - public FieldsProducer fieldsProducer(SegmentReadState state) + public InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState state) throws IOException { - return new FieldsReader(state); + return new InvertedInvertedFieldsReader(state); } private abstract class VisitPerFieldFile { Index: lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsWriter.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsWriter.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsWriter.java (working copy) @@ -118,7 +118,7 @@ final PostingsWriterBase postingsWriter; final FieldInfos fieldInfos; FieldInfo currentField; - private final List fields = new ArrayList(); + private final List fields = new ArrayList(); // private final String segment; /** Create a new writer. The number of items (terms or @@ -198,12 +198,12 @@ } @Override - public TermsConsumer addField(FieldInfo field) throws IOException { + public InvertedTermsConsumer addField(FieldInfo field) throws IOException { //DEBUG = field.name.equals("id"); //if (DEBUG) System.out.println("\nBTTW.addField seg=" + segment + " field=" + field.name); assert currentField == null || currentField.name.compareTo(field.name) < 0; currentField = field; - final TermsWriter terms = new TermsWriter(field); + final InvertedTermsWriter terms = new InvertedTermsWriter(field); fields.add(terms); return terms; } @@ -346,7 +346,7 @@ final RAMOutputStream scratchBytes = new RAMOutputStream(); - class TermsWriter extends TermsConsumer { + class InvertedTermsWriter extends InvertedTermsConsumer { private final FieldInfo fieldInfo; private long numTerms; long sumTotalTermFreq; @@ -819,7 +819,7 @@ return new PendingBlock(prefix, startFP, termCount != 0, isFloor, floorLeadByte, subIndices); } - TermsWriter(FieldInfo fieldInfo) { + InvertedTermsWriter(FieldInfo fieldInfo) { this.fieldInfo = fieldInfo; noOutputs = NoOutputs.getSingleton(); @@ -911,7 +911,7 @@ try { int nonZeroCount = 0; - for(TermsWriter field : fields) { + for(InvertedTermsWriter field : fields) { if (field.numTerms > 0) { nonZeroCount++; } @@ -922,7 +922,7 @@ out.writeVInt(nonZeroCount); - for(TermsWriter field : fields) { + for(InvertedTermsWriter field : fields) { if (field.numTerms > 0) { //System.out.println(" field " + field.fieldInfo.name + " " + field.numTerms + " terms"); out.writeVInt(field.fieldInfo.number); Index: lucene/core/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java (working copy) @@ -21,7 +21,7 @@ import org.apache.lucene.index.AtomicReaderContext; import org.apache.lucene.index.DocsEnum; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.AtomicReader; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; @@ -84,7 +84,7 @@ @Override public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException { final AtomicReader reader = context.reader(); - final Fields fields = reader.fields(); + final InvertedFields fields = reader.fields(); if (fields == null) { // reader has no fields return DocIdSet.EMPTY_DOCIDSET; Index: lucene/core/src/java/org/apache/lucene/search/Sort.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/Sort.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/search/Sort.java (working copy) @@ -155,7 +155,7 @@ * changes during their rewriting. * * @param searcher IndexSearcher to use in the rewriting - * @return {@code this} if the Sort/Fields have not changed, or a new Sort if there + * @return {@code this} if the Sort/InvertedFields have not changed, or a new Sort if there * is a change * @throws IOException Can be thrown by the rewriting * @lucene.experimental Index: lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java (working copy) @@ -18,7 +18,7 @@ */ import org.apache.lucene.index.AtomicReaderContext; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.Term; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.TermState; @@ -93,7 +93,7 @@ if (termContext == null) { // this happens with span-not query, as it doesn't include the NOT side in extractTerms() // so we seek to the term now in this segment..., this sucks because its ugly mostly! - final Fields fields = context.reader().fields(); + final InvertedFields fields = context.reader().fields(); if (fields != null) { final Terms terms = fields.terms(term.field()); if (terms != null) { Index: lucene/core/src/java/org/apache/lucene/search/SortField.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/SortField.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/search/SortField.java (working copy) @@ -29,7 +29,7 @@ /** * Stores information about how to sort documents by terms in an individual - * field. Fields must be indexed in order to sort by them. + * field. InvertedFields must be indexed in order to sort by them. * *

    Created: Feb 11, 2004 1:25:29 PM * Index: lucene/core/src/java/org/apache/lucene/search/TermCollectingRewrite.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/TermCollectingRewrite.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/search/TermCollectingRewrite.java (working copy) @@ -21,7 +21,7 @@ import java.util.Comparator; import org.apache.lucene.index.AtomicReaderContext; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.Term; @@ -50,7 +50,7 @@ Comparator lastTermComp = null; final AtomicReaderContext[] leaves = topReaderContext.leaves(); for (AtomicReaderContext context : leaves) { - final Fields fields = context.reader().fields(); + final InvertedFields fields = context.reader().fields(); if (fields == null) { // reader has no fields continue; Index: lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java (working copy) @@ -35,7 +35,7 @@ import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; // javadocs import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.StoredFieldVisitor; import org.apache.lucene.index.Term; @@ -813,7 +813,7 @@ final long sumTotalTermFreq; final long sumDocFreq; - Terms terms = MultiFields.getTerms(reader, field); + Terms terms = MultiInvertedFields.getTerms(reader, field); if (terms == null) { docCount = 0; sumTotalTermFreq = 0; Index: lucene/core/src/java/org/apache/lucene/index/DocTermOrds.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/DocTermOrds.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/index/DocTermOrds.java (working copy) @@ -200,7 +200,7 @@ } if (indexedTermsArray == null) { //System.out.println("GET normal enum"); - final Fields fields = reader.fields(); + final InvertedFields fields = reader.fields(); if (fields == null) { return null; } @@ -234,7 +234,7 @@ final int[] lastTerm = new int[maxDoc]; // last term we saw for this document final byte[][] bytes = new byte[maxDoc][]; // list of term numbers for the doc (delta encoded vInts) - final Fields fields = reader.fields(); + final InvertedFields fields = reader.fields(); if (fields == null) { // No terms return; Index: lucene/core/src/java/org/apache/lucene/index/SegmentReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/SegmentReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/index/SegmentReader.java (working copy) @@ -133,7 +133,7 @@ } @Override - public Fields fields() throws IOException { + public InvertedFields fields() throws IOException { ensureOpen(); return core.fields; } @@ -170,7 +170,7 @@ * @throws IOException */ @Override - public Fields getTermVectors(int docID) throws IOException { + public InvertedFields getTermVectors(int docID) throws IOException { TermVectorsReader termVectorsReader = getTermVectorsReader(); if (termVectorsReader == null) { return null; Index: lucene/core/src/java/org/apache/lucene/index/BufferedDeletesStream.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/BufferedDeletesStream.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/index/BufferedDeletesStream.java (working copy) @@ -355,7 +355,7 @@ // Delete by Term private synchronized long applyTermDeletes(Iterable termsIter, IndexWriter.ReadersAndLiveDocs rld, SegmentReader reader) throws IOException { long delCount = 0; - Fields fields = reader.fields(); + InvertedFields fields = reader.fields(); if (fields == null) { // This reader has no postings return 0; Index: lucene/core/src/java/org/apache/lucene/index/AtomicReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/AtomicReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/index/AtomicReader.java (working copy) @@ -19,11 +19,8 @@ import java.io.IOException; -import org.apache.lucene.search.SearcherManager; // javadocs -import org.apache.lucene.store.*; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.ReaderUtil; // for javadocs /** {@code AtomicReader} is an abstract class, providing an interface for accessing an index. Search of an index is done entirely through this abstract interface, @@ -70,15 +67,15 @@ } /** - * Returns {@link Fields} for this reader. + * Returns {@link InvertedFields} for this reader. * This method may return null if the reader has no * postings. */ - public abstract Fields fields() throws IOException; + public abstract InvertedFields fields() throws IOException; @Override public final int docFreq(String field, BytesRef term) throws IOException { - final Fields fields = fields(); + final InvertedFields fields = fields(); if (fields == null) { return 0; } @@ -100,7 +97,7 @@ * account deleted documents that have not yet been merged * away. */ public final long totalTermFreq(String field, BytesRef term) throws IOException { - final Fields fields = fields(); + final InvertedFields fields = fields(); if (fields == null) { return 0; } @@ -118,7 +115,7 @@ /** This may return null if the field does not exist.*/ public final Terms terms(String field) throws IOException { - final Fields fields = fields(); + final InvertedFields fields = fields(); if (fields == null) { return null; } @@ -131,7 +128,7 @@ public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, boolean needsFreqs) throws IOException { assert field != null; assert term != null; - final Fields fields = fields(); + final InvertedFields fields = fields(); if (fields != null) { final Terms terms = fields.terms(field); if (terms != null) { @@ -151,7 +148,7 @@ public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, boolean needsOffsets) throws IOException { assert field != null; assert term != null; - final Fields fields = fields(); + final InvertedFields fields = fields(); if (fields != null) { final Terms terms = fields.terms(field); if (terms != null) { @@ -172,7 +169,7 @@ public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsFreqs) throws IOException { assert state != null; assert field != null; - final Fields fields = fields(); + final InvertedFields fields = fields(); if (fields != null) { final Terms terms = fields.terms(field); if (terms != null) { @@ -193,7 +190,7 @@ public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsOffsets) throws IOException { assert state != null; assert field != null; - final Fields fields = fields(); + final InvertedFields fields = fields(); if (fields != null) { final Terms terms = fields.terms(field); if (terms != null) { @@ -209,7 +206,7 @@ * in this reader. */ public final long getUniqueTermCount() throws IOException { - final Fields fields = fields(); + final InvertedFields fields = fields(); if (fields == null) { return 0; } Index: lucene/core/src/java/org/apache/lucene/index/CheckIndex.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/CheckIndex.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/index/CheckIndex.java (working copy) @@ -578,7 +578,7 @@ // Test the Term Index segInfoStat.termIndexStatus = testPostings(reader); - // Test Stored Fields + // Test Stored InvertedFields segInfoStat.storedFieldStatus = testStoredFields(info, reader, nf); // Test Term Vectors @@ -710,7 +710,7 @@ } int computedFieldCount = 0; - final Fields fields = reader.fields(); + final InvertedFields fields = reader.fields(); if (fields == null) { msg("OK [no fields/terms]"); return status; @@ -973,7 +973,7 @@ // make sure TermsEnum is empty: final Terms fieldTerms2 = fieldsEnum.terms(); if (fieldTerms2 != null && fieldTerms2.iterator(null).next() != null) { - throw new RuntimeException("Fields.terms(field=" + field + ") returned null yet the field appears to have terms"); + throw new RuntimeException("InvertedFields.terms(field=" + field + ") returned null yet the field appears to have terms"); } } else { if (fieldTerms instanceof BlockTreeTermsReader.FieldReader) { @@ -1255,7 +1255,7 @@ } } - msg("OK [" + status.docCount + " total doc Count; Num DocValues Fields " + msg("OK [" + status.docCount + " total doc Count; Num DocValues InvertedFields " + status.totalValueFields); } catch (Throwable e) { msg("ERROR [" + String.valueOf(e.getMessage()) + "]"); @@ -1292,7 +1292,7 @@ final Bits liveDocs = reader.getLiveDocs(); - final Fields postingsFields; + final InvertedFields postingsFields; // TODO: testTermsIndex if (crossCheckTermVectors) { postingsFields = reader.fields(); @@ -1306,7 +1306,7 @@ for (int j = 0; j < info.docCount; ++j) { if (liveDocs == null || liveDocs.get(j)) { status.docCount++; - Fields tfv = reader.getTermVectors(j); + InvertedFields tfv = reader.getTermVectors(j); if (tfv != null) { int tfvComputedFieldCount = 0; long tfvComputedTermCount = 0; Index: lucene/core/src/java/org/apache/lucene/index/BaseMultiReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/BaseMultiReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/index/BaseMultiReader.java (working copy) @@ -49,7 +49,7 @@ } @Override - public final Fields getTermVectors(int docID) throws IOException { + public final InvertedFields getTermVectors(int docID) throws IOException { ensureOpen(); final int i = readerIndex(docID); // find segment num return subReaders[i].getTermVectors(docID - starts[i]); // dispatch to segment Index: lucene/core/src/java/org/apache/lucene/index/Terms.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/Terms.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/index/Terms.java (working copy) @@ -24,7 +24,7 @@ import org.apache.lucene.util.automaton.CompiledAutomaton; /** - * Access to the terms in a specific field. See {@link Fields}. + * Access to the terms in a specific field. See {@link InvertedFields}. * @lucene.experimental */ Index: lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java (working copy) @@ -24,15 +24,13 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.ReaderUtil; // javadoc -import org.apache.lucene.index.DirectoryReader; // javadoc -import org.apache.lucene.index.MultiReader; // javadoc /** * This class forces a composite reader (eg a {@link * MultiReader} or {@link DirectoryReader}) to emulate an * atomic reader. This requires implementing the postings * APIs on-the-fly, using the static methods in {@link - * MultiFields}, {@link MultiDocValues}, + * MultiInvertedFields}, {@link MultiDocValues}, * by stepping through the sub-readers to merge fields/terms, * appending docs, etc. * @@ -47,7 +45,7 @@ private final CompositeReader in; private final Map normsCache = new HashMap(); - private final Fields fields; + private final InvertedFields fields; private final Bits liveDocs; /** This method is sugar for getting an {@link AtomicReader} from @@ -66,8 +64,8 @@ public SlowCompositeReaderWrapper(CompositeReader reader) throws IOException { super(); in = reader; - fields = MultiFields.getFields(in); - liveDocs = MultiFields.getLiveDocs(in); + fields = MultiInvertedFields.getFields(in); + liveDocs = MultiInvertedFields.getLiveDocs(in); } @Override @@ -76,7 +74,7 @@ } @Override - public Fields fields() throws IOException { + public InvertedFields fields() throws IOException { ensureOpen(); in.ensureOpen(); // as we cached the fields, we better check the original reader return fields; @@ -100,7 +98,7 @@ } @Override - public Fields getTermVectors(int docID) + public InvertedFields getTermVectors(int docID) throws IOException { ensureOpen(); return in.getTermVectors(docID); @@ -134,7 +132,7 @@ @Override public FieldInfos getFieldInfos() { ensureOpen(); - return MultiFields.getMergedFieldInfos(in); + return MultiInvertedFields.getMergedFieldInfos(in); } @Override Index: lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java (working copy) @@ -334,14 +334,14 @@ private final void mergeTerms(SegmentWriteState segmentWriteState) throws CorruptIndexException, IOException { - final List fields = new ArrayList(); + final List fields = new ArrayList(); final List slices = new ArrayList(); int docBase = 0; for(int readerIndex=0;readerIndex IMPL: FormatPostingsTermsDictWriter - -> TermsConsumer - -> IMPL: FormatPostingsTermsDictWriter.TermsWriter + -> InvertedTermsConsumer + -> IMPL: FormatPostingsTermsDictWriter.InvertedTermsWriter -> DocsConsumer -> IMPL: FormatPostingsDocsWriter -> PositionsConsumer Index: lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java (working copy) @@ -26,7 +26,7 @@ import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.PostingsConsumer; import org.apache.lucene.codecs.TermStats; -import org.apache.lucene.codecs.TermsConsumer; +import org.apache.lucene.codecs.InvertedTermsConsumer; import org.apache.lucene.index.FieldInfo.IndexOptions; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; @@ -319,7 +319,7 @@ void flush(String fieldName, FieldsConsumer consumer, final SegmentWriteState state) throws CorruptIndexException, IOException { - final TermsConsumer termsConsumer = consumer.addField(fieldInfo); + final InvertedTermsConsumer termsConsumer = consumer.addField(fieldInfo); final Comparator termComp = termsConsumer.getComparator(); // CONFUSING: this.indexOptions holds the index options @@ -382,7 +382,7 @@ // TODO: really TermsHashPerField should take over most // of this loop, including merge sort of terms from // multiple threads and interacting with the - // TermsConsumer, only calling out to us (passing us the + // InvertedTermsConsumer, only calling out to us (passing us the // DocsConsumer) to handle delivery of docs/positions final PostingsConsumer postingsConsumer = termsConsumer.startTerm(text); Index: lucene/core/src/java/org/apache/lucene/index/ParallelAtomicReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/ParallelAtomicReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/index/ParallelAtomicReader.java (working copy) @@ -50,7 +50,7 @@ */ public final class ParallelAtomicReader extends AtomicReader { private final FieldInfos fieldInfos = new FieldInfos(); - private final ParallelFields fields = new ParallelFields(); + private final ParallelInvertedFields fields = new ParallelInvertedFields(); private final AtomicReader[] parallelReaders, storedFieldsReaders; private final Set completeReaderSet = Collections.newSetFromMap(new IdentityHashMap()); @@ -126,9 +126,9 @@ private final class ParallelFieldsEnum extends FieldsEnum { private String currentField; private final Iterator keys; - private final Fields fields; + private final InvertedFields fields; - ParallelFieldsEnum(Fields fields) { + ParallelFieldsEnum(InvertedFields fields) { this.fields = fields; keys = fieldToReader.keySet().iterator(); } @@ -151,10 +151,10 @@ } // Single instance of this, per ParallelReader instance - private final class ParallelFields extends Fields { + private final class ParallelInvertedFields extends InvertedFields { final HashMap fields = new HashMap(); - ParallelFields() { + ParallelInvertedFields() { } void addField(String fieldName, Terms terms) throws IOException { @@ -189,7 +189,7 @@ } @Override - public Fields fields() { + public InvertedFields fields() { ensureOpen(); return fields; } @@ -222,9 +222,9 @@ // get all vectors @Override - public Fields getTermVectors(int docID) throws IOException { + public InvertedFields getTermVectors(int docID) throws IOException { ensureOpen(); - ParallelFields fields = new ParallelFields(); + ParallelInvertedFields fields = new ParallelInvertedFields(); for (Map.Entry ent : fieldToReader.entrySet()) { String fieldName = ent.getKey(); Terms vector = ent.getValue().getTermVector(docID, fieldName); Index: lucene/core/src/java/org/apache/lucene/index/DocValues.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/DocValues.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/index/DocValues.java (working copy) @@ -74,7 +74,7 @@ *

    * {@link Source} instances obtained from this method are closed / released * from the cache once this {@link DocValues} instance is closed by the - * {@link IndexReader}, {@link Fields} or {@link FieldsEnum} the + * {@link IndexReader}, {@link InvertedFields} or {@link FieldsEnum} the * {@link DocValues} was created from. */ public Source getSource() throws IOException { @@ -541,12 +541,12 @@ BYTES_FIXED_STRAIGHT, /** - * A fixed length dereferenced byte[] variant. Fields with + * A fixed length dereferenced byte[] variant. InvertedFields with * this type only store distinct byte values and store an additional offset * pointer per document to dereference the shared byte[]. * Use this type if your documents may share the same byte[]. *

    - * NOTE: Fields of this type will not store values for documents without and + * NOTE: InvertedFields of this type will not store values for documents without and * explicitly provided value. If a documents value is accessed while no * explicit value is stored the returned {@link BytesRef} will be a 0-length * reference. Custom default values must be assigned explicitly. @@ -560,7 +560,7 @@ * disk-resident API might yield performance degradation since no additional * index is used to advance by more than one document value at a time. *

    - * NOTE: Fields of this type will not store values for documents without an + * NOTE: InvertedFields of this type will not store values for documents without an * explicitly provided value. If a documents value is accessed while no * explicit value is stored the returned {@link BytesRef} will be a 0-length * byte[] reference. Custom default values must be assigned explicitly. @@ -573,7 +573,7 @@ * {@link #BYTES_FIXED_DEREF}, but allowing each * document's value to be a different length. *

    - * NOTE: Fields of this type will not store values for documents without and + * NOTE: InvertedFields of this type will not store values for documents without and * explicitly provided value. If a documents value is accessed while no * explicit value is stored the returned {@link BytesRef} will be a 0-length * reference. Custom default values must be assigned explicitly. @@ -587,7 +587,7 @@ * {@link #BYTES_FIXED_SORTED}, but allowing each * document's value to be a different length. *

    - * NOTE: Fields of this type will not store values for documents without and + * NOTE: InvertedFields of this type will not store values for documents without and * explicitly provided value. If a documents value is accessed while no * explicit value is stored the returned {@link BytesRef} will be a 0-length * reference.Custom default values must be assigned explicitly. @@ -598,14 +598,14 @@ BYTES_VAR_SORTED, /** - * A fixed length pre-sorted byte[] variant. Fields with this type only + * A fixed length pre-sorted byte[] variant. InvertedFields with this type only * store distinct byte values and store an additional offset pointer per * document to dereference the shared byte[]. The stored * byte[] is presorted, by default by unsigned byte order, * and allows access via document id, ordinal and by-value. * Use this type if your documents may share the same byte[]. *

    - * NOTE: Fields of this type will not store values for documents without and + * NOTE: InvertedFields of this type will not store values for documents without and * explicitly provided value. If a documents value is accessed while no * explicit value is stored the returned {@link BytesRef} will be a 0-length * reference. Custom default values must be assigned Index: lucene/core/src/java/org/apache/lucene/index/IndexReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/IndexReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/index/IndexReader.java (working copy) @@ -26,11 +26,9 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.DocumentStoredFieldVisitor; -import org.apache.lucene.search.SearcherManager; // javadocs import org.apache.lucene.store.*; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.ReaderUtil; // for javadocs /** IndexReader is an abstract class, providing an interface for accessing an index. Search of an index is done entirely through this abstract interface, @@ -314,19 +312,19 @@ } /** Retrieve term vectors for this document, or null if - * term vectors were not indexed. The returned Fields + * term vectors were not indexed. The returned InvertedFields * instance acts like a single-document inverted index * (the docID will be 0). */ - public abstract Fields getTermVectors(int docID) + public abstract InvertedFields getTermVectors(int docID) throws IOException; /** Retrieve term vector for this document and field, or * null if term vectors were not indexed. The returned - * Fields instance acts like a single-document inverted + * InvertedFields instance acts like a single-document inverted * index (the docID will be 0). */ public final Terms getTermVector(int docID, String field) throws IOException { - Fields vectors = getTermVectors(docID); + InvertedFields vectors = getTermVectors(docID); if (vectors == null) { return null; } @@ -363,7 +361,7 @@ * requested document is deleted, and therefore asking for a deleted document * may yield unspecified results. Usually this is not required, however you * can test if the doc is deleted by checking the {@link - * Bits} returned from {@link MultiFields#getLiveDocs}. + * Bits} returned from {@link MultiInvertedFields#getLiveDocs}. * * NOTE: only the content of a field is returned, * if that field was stored during indexing. Metadata Index: lucene/core/src/java/org/apache/lucene/index/FilterAtomicReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/FilterAtomicReader.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/index/FilterAtomicReader.java (working copy) @@ -34,12 +34,12 @@ */ public class FilterAtomicReader extends AtomicReader { - /** Base class for filtering {@link Fields} + /** Base class for filtering {@link InvertedFields} * implementations. */ - public static class FilterFields extends Fields { - protected Fields in; + public static class FilterInvertedFields extends InvertedFields { + protected InvertedFields in; - public FilterFields(Fields in) { + public FilterInvertedFields(InvertedFields in) { this.in = in; } @@ -296,7 +296,7 @@ } @Override - public Fields getTermVectors(int docID) + public InvertedFields getTermVectors(int docID) throws IOException { ensureOpen(); return in.getTermVectors(docID); @@ -338,7 +338,7 @@ } @Override - public Fields fields() throws IOException { + public InvertedFields fields() throws IOException { ensureOpen(); return in.fields(); } Index: lucene/core/src/java/org/apache/lucene/index/SegmentCoreReaders.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/SegmentCoreReaders.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/index/SegmentCoreReaders.java (working copy) @@ -24,7 +24,7 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.codecs.PerDocProducer; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.StoredFieldsReader; @@ -50,7 +50,7 @@ final FieldInfos fieldInfos; - final FieldsProducer fields; + final InvertedInvertedFieldsProducer fields; final PerDocProducer perDocProducer; final PerDocProducer norms; @@ -104,7 +104,7 @@ this.termsIndexDivisor = termsIndexDivisor; final PostingsFormat format = codec.postingsFormat(); final SegmentReadState segmentReadState = new SegmentReadState(cfsDir, si, fieldInfos, context, termsIndexDivisor); - // Ask codec for its Fields + // Ask codec for its InvertedFields fields = format.fieldsProducer(segmentReadState); assert fields != null; // ask codec for its Norms: Index: lucene/core/src/java/org/apache/lucene/util/TermContext.java =================================================================== --- lucene/core/src/java/org/apache/lucene/util/TermContext.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/util/TermContext.java (working copy) @@ -21,7 +21,7 @@ import java.util.Arrays; import org.apache.lucene.index.AtomicReaderContext; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.Term; @@ -89,7 +89,7 @@ //if (DEBUG) System.out.println("prts.build term=" + term); for (int i = 0; i < leaves.length; i++) { //if (DEBUG) System.out.println(" r=" + leaves[i].reader); - final Fields fields = leaves[i].reader().fields(); + final InvertedFields fields = leaves[i].reader().fields(); if (fields != null) { final Terms terms = fields.terms(field); if (terms != null) { Index: lucene/core/src/java/org/apache/lucene/document/Field.java =================================================================== --- lucene/core/src/java/org/apache/lucene/document/Field.java (revision 1245069) +++ lucene/core/src/java/org/apache/lucene/document/Field.java (working copy) @@ -41,7 +41,7 @@ *

    A field is a section of a Document. Each field has three * parts: name, type andvalue. Values may be text * (String, Reader or pre-analyzed TokenStream), binary - * (byte[]), or numeric (a Number). Fields are optionally stored in the + * (byte[]), or numeric (a Number). InvertedFields are optionally stored in the * index, so that they may be returned with hits on the document. * *

    @@ -152,7 +152,7 @@ throw new IllegalArgumentException("name cannot be null"); } if (type.indexed()) { - throw new IllegalArgumentException("Fields with BytesRef values cannot be indexed"); + throw new IllegalArgumentException("InvertedFields with BytesRef values cannot be indexed"); } this.fieldsData = bytes; this.type = type; @@ -477,7 +477,7 @@ if (!fieldType().tokenized()) { if (stringValue() == null) { - throw new IllegalArgumentException("Non-Tokenized Fields must have a String value"); + throw new IllegalArgumentException("Non-Tokenized InvertedFields must have a String value"); } return new TokenStream() { Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java (revision 1245069) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java (working copy) @@ -71,7 +71,7 @@ assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); // rounding error Document doc = ir.document(0); assertEquals("0", doc.get("id")); - TermsEnum te = MultiFields.getTerms(ir, "id").iterator(null); + TermsEnum te = MultiInvertedFields.getTerms(ir, "id").iterator(null); assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef("1"))); assertNotSame("1", te.term().utf8ToString()); ir.close(); @@ -79,7 +79,7 @@ assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); doc = ir.document(0); assertEquals("1", doc.get("id")); - te = MultiFields.getTerms(ir, "id").iterator(null); + te = MultiInvertedFields.getTerms(ir, "id").iterator(null); assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef("0"))); assertNotSame("0", te.term().utf8ToString()); @@ -89,7 +89,7 @@ doc = ir.document(0); assertEquals("2", doc.get("id")); - te = MultiFields.getTerms(ir, "id").iterator(null); + te = MultiInvertedFields.getTerms(ir, "id").iterator(null); assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef("1"))); assertNotSame("1", te.term()); @@ -129,7 +129,7 @@ doc = ir.document(0); assertEquals(start + "", doc.get("id")); // make sure the deleted doc is not here - TermsEnum te = MultiFields.getTerms(ir, "id").iterator(null); + TermsEnum te = MultiInvertedFields.getTerms(ir, "id").iterator(null); Term t = new Term("id", (NUM_DOCS - 1) + ""); assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef(t.text()))); assertNotSame(t.text(), te.term().utf8ToString()); Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestPKIndexSplitter.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestPKIndexSplitter.java (revision 1245069) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestPKIndexSplitter.java (working copy) @@ -91,7 +91,7 @@ } private void checkContents(IndexReader ir, String indexname) throws Exception { - final Bits liveDocs = MultiFields.getLiveDocs(ir); + final Bits liveDocs = MultiInvertedFields.getLiveDocs(ir); for (int i = 0; i < ir.maxDoc(); i++) { if (liveDocs == null || liveDocs.get(i)) { assertEquals(indexname, ir.document(i).get("indexname")); Index: lucene/contrib/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java (revision 1245069) +++ lucene/contrib/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java (working copy) @@ -19,8 +19,8 @@ import org.apache.lucene.index.AtomicReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.FieldsEnum; import org.apache.lucene.index.Terms; @@ -115,7 +115,7 @@ TermStatsQueue tiq = null; if (field != null) { - Fields fields = MultiFields.getFields(reader); + InvertedFields fields = MultiInvertedFields.getFields(reader); if (fields == null) { throw new RuntimeException("field " + field + " not found"); } @@ -126,7 +126,7 @@ fillQueue(termsEnum, tiq, field); } } else { - Fields fields = MultiFields.getFields(reader); + InvertedFields fields = MultiInvertedFields.getFields(reader); if (fields == null) { throw new RuntimeException("no fields found for this index"); } Index: lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java =================================================================== --- lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java (revision 1245069) +++ lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java (working copy) @@ -137,7 +137,7 @@ DocsEnum td = _TestUtil.docs(random, reader, KEY_FIELD, new BytesRef(url), - MultiFields.getLiveDocs(reader), + MultiInvertedFields.getLiveDocs(reader), null, false); @@ -161,7 +161,7 @@ DocsEnum td = _TestUtil.docs(random, reader, KEY_FIELD, new BytesRef(url), - MultiFields.getLiveDocs(reader), + MultiInvertedFields.getLiveDocs(reader), null, false); Index: lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java =================================================================== --- lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java (revision 1245069) +++ lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java (working copy) @@ -17,10 +17,10 @@ * limitations under the License. */ +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.store.Directory; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; @@ -81,7 +81,7 @@ } public void testMatchAll() throws Exception { - Terms terms = MultiFields.getTerms(searcher.getIndexReader(), FN); + Terms terms = MultiInvertedFields.getTerms(searcher.getIndexReader(), FN); TermsEnum te = new RegexQuery(new Term(FN, "jum.")).getTermsEnum(terms, new AttributeSource() /*dummy*/); // no term should match assertNull(te.next()); Index: lucene/contrib/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java =================================================================== --- lucene/contrib/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java (revision 1245069) +++ lucene/contrib/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java (working copy) @@ -28,7 +28,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.*; import org.apache.lucene.search.similarities.TFIDFSimilarity; @@ -211,7 +211,7 @@ AttributeSource atts = new AttributeSource(); MaxNonCompetitiveBoostAttribute maxBoostAtt = atts.addAttribute(MaxNonCompetitiveBoostAttribute.class); - FuzzyTermsEnum fe = new FuzzyTermsEnum(MultiFields.getTerms(reader, startTerm.field()), atts, startTerm, f.minSimilarity, f.prefixLength, false); + FuzzyTermsEnum fe = new FuzzyTermsEnum(MultiInvertedFields.getTerms(reader, startTerm.field()), atts, startTerm, f.minSimilarity, f.prefixLength, false); //store the df so all variants use same idf int df = reader.docFreq(startTerm); int numVariants=0; Index: lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java =================================================================== --- lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java (revision 1245069) +++ lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java (working copy) @@ -33,7 +33,7 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.document.Document; import org.apache.lucene.index.DocsAndPositionsEnum; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; @@ -70,7 +70,7 @@ String field, Document doc, Analyzer analyzer) throws IOException { TokenStream ts = null; - Fields vectors = reader.getTermVectors(docId); + InvertedFields vectors = reader.getTermVectors(docId); if (vectors != null) { Terms vector = vectors.terms(field); if (vector != null) { @@ -102,7 +102,7 @@ String field, Analyzer analyzer) throws IOException { TokenStream ts = null; - Fields vectors = reader.getTermVectors(docId); + InvertedFields vectors = reader.getTermVectors(docId); if (vectors != null) { Terms vector = vectors.terms(field); if (vector != null) { @@ -275,7 +275,7 @@ public static TokenStream getTokenStream(IndexReader reader, int docId, String field) throws IOException { - Fields vectors = reader.getTermVectors(docId); + InvertedFields vectors = reader.getTermVectors(docId); if (vectors == null) { throw new IllegalArgumentException(field + " in doc #" + docId + "does not have any term position data stored"); Index: lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java =================================================================== --- lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java (revision 1245069) +++ lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java (working copy) @@ -22,7 +22,7 @@ import java.util.Set; import org.apache.lucene.index.DocsAndPositionsEnum; -import org.apache.lucene.index.Fields; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; @@ -78,7 +78,7 @@ // just return to make null snippet if un-matched fieldName specified when fieldMatch == true if( termSet == null ) return; - final Fields vectors = reader.getTermVectors(docId); + final InvertedFields vectors = reader.getTermVectors(docId); if (vectors == null) { // null snippet return; Index: lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java =================================================================== --- lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (revision 1245069) +++ lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (working copy) @@ -35,15 +35,14 @@ import org.apache.lucene.document.Document; import org.apache.lucene.index.AtomicReader; import org.apache.lucene.index.AtomicReaderContext; +import org.apache.lucene.index.InvertedFields; import org.apache.lucene.index.Norm; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.FieldInvertState; -import org.apache.lucene.index.Fields; import org.apache.lucene.index.FieldsEnum; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.OrdTermState; import org.apache.lucene.index.StoredFieldVisitor; import org.apache.lucene.index.Term; @@ -775,7 +774,7 @@ return fieldInfos; } - private class MemoryFields extends Fields { + private class MemoryInvertedFields extends InvertedFields { @Override public FieldsEnum iterator() { return new FieldsEnum() { @@ -792,7 +791,7 @@ @Override public Terms terms() { - return MemoryFields.this.terms(sortedFields[upto].getKey()); + return MemoryInvertedFields.this.terms(sortedFields[upto].getKey()); } }; } @@ -850,9 +849,9 @@ } @Override - public Fields fields() { + public InvertedFields fields() { sortFields(); - return new MemoryFields(); + return new MemoryInvertedFields(); } private class MemoryTermsEnum extends TermsEnum { @@ -1076,7 +1075,7 @@ } @Override - public Fields getTermVectors(int docID) { + public InvertedFields getTermVectors(int docID) { if (docID == 0) { return fields(); } else { Index: lucene/MIGRATE.txt =================================================================== --- lucene/MIGRATE.txt (revision 1245069) +++ lucene/MIGRATE.txt (working copy) @@ -271,7 +271,7 @@ Instead of IndexReader.isDeleted, do this: import org.apache.lucene.util.Bits; - import org.apache.lucene.index.MultiFields; + import org.apache.lucene.index.MultiInvertedFields; Bits delDocs = MultiFields.getDeletedDocs(indexReader); if (delDocs.get(docID)) { Index: lucene/test-framework/src/java/org/apache/lucene/codecs/mockintblock/MockFixedIntBlockPostingsFormat.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/codecs/mockintblock/MockFixedIntBlockPostingsFormat.java (revision 1245069) +++ lucene/test-framework/src/java/org/apache/lucene/codecs/mockintblock/MockFixedIntBlockPostingsFormat.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.codecs.BlockTermsReader; import org.apache.lucene.codecs.BlockTermsWriter; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.codecs.FixedGapTermsIndexReader; import org.apache.lucene.codecs.FixedGapTermsIndexWriter; import org.apache.lucene.codecs.PostingsFormat; @@ -156,7 +156,7 @@ } @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + public InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postingsReader = new SepPostingsReader(state.dir, state.segmentInfo, state.context, @@ -180,7 +180,7 @@ success = false; try { - FieldsProducer ret = new BlockTermsReader(indexReader, + InvertedInvertedFieldsProducer ret = new BlockTermsReader(indexReader, state.dir, state.fieldInfos, state.segmentInfo.name, Index: lucene/test-framework/src/java/org/apache/lucene/codecs/mockintblock/MockVariableIntBlockPostingsFormat.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/codecs/mockintblock/MockVariableIntBlockPostingsFormat.java (revision 1245069) +++ lucene/test-framework/src/java/org/apache/lucene/codecs/mockintblock/MockVariableIntBlockPostingsFormat.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.codecs.BlockTermsReader; import org.apache.lucene.codecs.BlockTermsWriter; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.codecs.FixedGapTermsIndexReader; import org.apache.lucene.codecs.FixedGapTermsIndexWriter; import org.apache.lucene.codecs.PostingsFormat; @@ -179,7 +179,7 @@ } @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + public InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postingsReader = new SepPostingsReader(state.dir, state.segmentInfo, state.context, @@ -203,7 +203,7 @@ success = false; try { - FieldsProducer ret = new BlockTermsReader(indexReader, + InvertedInvertedFieldsProducer ret = new BlockTermsReader(indexReader, state.dir, state.fieldInfos, state.segmentInfo.name, Index: lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java (revision 1245069) +++ lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java (working copy) @@ -30,11 +30,11 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.codecs.PostingsConsumer; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.TermStats; -import org.apache.lucene.codecs.TermsConsumer; +import org.apache.lucene.codecs.InvertedTermsConsumer; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.FieldInfo.IndexOptions; @@ -100,7 +100,7 @@ } // Postings state: - static class RAMPostings extends FieldsProducer { + static class RAMPostings extends InvertedInvertedFieldsProducer { final Map fieldToTerms = new TreeMap(); @Override @@ -189,14 +189,14 @@ private static class RAMFieldsConsumer extends FieldsConsumer { private final RAMPostings postings; - private final RAMTermsConsumer termsConsumer = new RAMTermsConsumer(); + private final RAMInvertedTermsConsumer termsConsumer = new RAMInvertedTermsConsumer(); public RAMFieldsConsumer(RAMPostings postings) { this.postings = postings; } @Override - public TermsConsumer addField(FieldInfo field) { + public InvertedTermsConsumer addField(FieldInfo field) { if (field.indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0) { throw new UnsupportedOperationException("this codec cannot index offsets"); } @@ -212,7 +212,7 @@ } } - private static class RAMTermsConsumer extends TermsConsumer { + private static class RAMInvertedTermsConsumer extends InvertedTermsConsumer { private RAMField field; private final RAMPostingsWriterImpl postingsWriter = new RAMPostingsWriterImpl(); RAMTerm current; @@ -565,7 +565,7 @@ } @Override - public FieldsProducer fieldsProducer(SegmentReadState readState) + public InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState readState) throws IOException { // Load our ID: Index: lucene/test-framework/src/java/org/apache/lucene/codecs/mocksep/MockSepPostingsFormat.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/codecs/mocksep/MockSepPostingsFormat.java (revision 1245069) +++ lucene/test-framework/src/java/org/apache/lucene/codecs/mocksep/MockSepPostingsFormat.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.codecs.BlockTermsReader; import org.apache.lucene.codecs.BlockTermsWriter; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.codecs.FixedGapTermsIndexReader; import org.apache.lucene.codecs.FixedGapTermsIndexWriter; import org.apache.lucene.codecs.PostingsFormat; @@ -84,7 +84,7 @@ } @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + public InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postingsReader = new SepPostingsReader(state.dir, state.segmentInfo, state.context, new MockSingleIntFactory(), state.segmentSuffix); @@ -107,7 +107,7 @@ success = false; try { - FieldsProducer ret = new BlockTermsReader(indexReader, + InvertedInvertedFieldsProducer ret = new BlockTermsReader(indexReader, state.dir, state.fieldInfos, state.segmentInfo.name, Index: lucene/test-framework/src/java/org/apache/lucene/codecs/nestedpulsing/NestedPulsingPostingsFormat.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/codecs/nestedpulsing/NestedPulsingPostingsFormat.java (revision 1245069) +++ lucene/test-framework/src/java/org/apache/lucene/codecs/nestedpulsing/NestedPulsingPostingsFormat.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.codecs.BlockTreeTermsReader; import org.apache.lucene.codecs.BlockTreeTermsWriter; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.PostingsReaderBase; import org.apache.lucene.codecs.PostingsWriterBase; @@ -69,13 +69,13 @@ } @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + public InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase docsReader = new Lucene40PostingsReader(state.dir, state.segmentInfo, state.context, state.segmentSuffix); PostingsReaderBase pulsingReaderInner = new PulsingPostingsReader(docsReader); PostingsReaderBase pulsingReader = new PulsingPostingsReader(pulsingReaderInner); boolean success = false; try { - FieldsProducer ret = new BlockTreeTermsReader( + InvertedInvertedFieldsProducer ret = new BlockTreeTermsReader( state.dir, state.fieldInfos, state.segmentInfo.name, pulsingReader, state.context, Index: lucene/test-framework/src/java/org/apache/lucene/codecs/lucene3x/PreFlexRWFieldsWriter.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/codecs/lucene3x/PreFlexRWFieldsWriter.java (revision 1245069) +++ lucene/test-framework/src/java/org/apache/lucene/codecs/lucene3x/PreFlexRWFieldsWriter.java (working copy) @@ -21,9 +21,9 @@ import java.util.Comparator; import org.apache.lucene.codecs.FieldsConsumer; +import org.apache.lucene.codecs.InvertedTermsConsumer; import org.apache.lucene.codecs.PostingsConsumer; import org.apache.lucene.codecs.TermStats; -import org.apache.lucene.codecs.TermsConsumer; import org.apache.lucene.codecs.lucene40.Lucene40SkipListWriter; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.FieldInfo; @@ -84,7 +84,7 @@ } @Override - public TermsConsumer addField(FieldInfo field) throws IOException { + public InvertedTermsConsumer addField(FieldInfo field) throws IOException { assert field.number != -1; if (field.indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0) { throw new UnsupportedOperationException("this codec cannot index offsets"); @@ -98,7 +98,7 @@ IOUtils.close(termsOut, freqOut, proxOut); } - private class PreFlexTermsWriter extends TermsConsumer { + private class PreFlexTermsWriter extends InvertedTermsConsumer { private final FieldInfo fieldInfo; private final boolean omitTF; private final boolean storePayloads; Index: lucene/test-framework/src/java/org/apache/lucene/codecs/lucene3x/PreFlexRWPostingsFormat.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/codecs/lucene3x/PreFlexRWPostingsFormat.java (revision 1245069) +++ lucene/test-framework/src/java/org/apache/lucene/codecs/lucene3x/PreFlexRWPostingsFormat.java (working copy) @@ -20,7 +20,7 @@ import java.io.IOException; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.util.LuceneTestCase; @@ -43,7 +43,7 @@ } @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + public InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState state) throws IOException { // Whenever IW opens readers, eg for merging, we have to // keep terms order in UTF16: Index: lucene/test-framework/src/java/org/apache/lucene/codecs/lucene40ords/Lucene40WithOrds.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/codecs/lucene40ords/Lucene40WithOrds.java (revision 1245069) +++ lucene/test-framework/src/java/org/apache/lucene/codecs/lucene40ords/Lucene40WithOrds.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.codecs.BlockTermsReader; import org.apache.lucene.codecs.BlockTermsWriter; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.codecs.FixedGapTermsIndexReader; import org.apache.lucene.codecs.FixedGapTermsIndexWriter; import org.apache.lucene.codecs.PostingsFormat; @@ -87,7 +87,7 @@ public final static int TERMS_CACHE_SIZE = 1024; @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + public InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postings = new Lucene40PostingsReader(state.dir, state.segmentInfo, state.context, state.segmentSuffix); TermsIndexReaderBase indexReader; @@ -108,7 +108,7 @@ success = false; try { - FieldsProducer ret = new BlockTermsReader(indexReader, + InvertedInvertedFieldsProducer ret = new BlockTermsReader(indexReader, state.dir, state.fieldInfos, state.segmentInfo.name, Index: lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java (revision 1245069) +++ lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java (working copy) @@ -29,7 +29,7 @@ import org.apache.lucene.codecs.BlockTreeTermsReader; import org.apache.lucene.codecs.BlockTreeTermsWriter; import org.apache.lucene.codecs.FieldsConsumer; -import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.InvertedInvertedFieldsProducer; import org.apache.lucene.codecs.FixedGapTermsIndexReader; import org.apache.lucene.codecs.FixedGapTermsIndexWriter; import org.apache.lucene.codecs.PostingsFormat; @@ -270,7 +270,7 @@ } @Override - public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + public InvertedInvertedFieldsProducer fieldsProducer(SegmentReadState state) throws IOException { final String seedFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, SEED_EXT); final IndexInput in = state.dir.openInput(seedFileName, state.context); @@ -310,7 +310,7 @@ postingsReader = new PulsingPostingsReader(postingsReader); } - final FieldsProducer fields; + final InvertedInvertedFieldsProducer fields; if (random.nextBoolean()) { // Use BlockTree terms dict Index: lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java (revision 1245069) +++ lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java (working copy) @@ -51,7 +51,7 @@ public static final FieldType customType2; public static final String FIELD_2_TEXT = "field field field two text"; - //Fields will be lexicographically sorted. So, the order is: field, text, two + //InvertedFields will be lexicographically sorted. So, the order is: field, text, two public static final int [] FIELD_2_FREQS = {3, 1, 1}; public static final String TEXT_FIELD_2_KEY = "textField2"; public static Field textField2; @@ -145,7 +145,7 @@ public static Field textUtfField1 = new Field(TEXT_FIELD_UTF1_KEY, FIELD_UTF1_TEXT, customType); public static final String FIELD_UTF2_TEXT = "field field field \u4e00two text"; - //Fields will be lexicographically sorted. So, the order is: field, text, two + //InvertedFields will be lexicographically sorted. So, the order is: field, text, two public static final int [] FIELD_UTF2_FREQS = {3, 1, 1}; public static final String TEXT_FIELD_UTF2_KEY = "textField2Utf8"; public static Field textUtfField2 = new Field(TEXT_FIELD_UTF2_KEY, FIELD_UTF2_TEXT, customType2); Index: lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java (revision 1245069) +++ lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java (working copy) @@ -340,7 +340,7 @@ try { if (s.getIndexReader().numDocs() > 0) { smokeTestSearcher(s); - Fields fields = MultiFields.getFields(s.getIndexReader()); + InvertedFields fields = MultiInvertedFields.getFields(s.getIndexReader()); if (fields == null) { continue; } Index: lucene/test-framework/src/java/org/apache/lucene/util/_TestUtil.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/util/_TestUtil.java (revision 1245069) +++ lucene/test-framework/src/java/org/apache/lucene/util/_TestUtil.java (working copy) @@ -50,7 +50,7 @@ import org.apache.lucene.index.LogMergePolicy; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.MergeScheduler; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiInvertedFields; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.TieredMergePolicy; @@ -659,7 +659,7 @@ // DocsAndFreqsEnum, DocsAndPositionsEnum. Returns null // if field/term doesn't exist: public static DocsEnum docs(Random random, IndexReader r, String field, BytesRef term, Bits liveDocs, DocsEnum reuse, boolean needsFreqs) throws IOException { - final Terms terms = MultiFields.getTerms(r, field); + final Terms terms = MultiInvertedFields.getTerms(r, field); if (terms == null) { return null; }