Index: lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java =================================================================== --- lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java (revision 1037295) +++ lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java (working copy) @@ -20,7 +20,6 @@ import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; -import java.io.FileReader; import java.io.IOException; import java.io.InputStreamReader; import java.util.Date; @@ -28,7 +27,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; -import org.apache.lucene.index.FilterIndexReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.Collector; @@ -44,31 +42,12 @@ /** Simple command-line based search demo. */ public class SearchFiles { - /** Use the norms from one field for all fields. Norms are read into memory, - * using a byte of memory per document per searched field. This can cause - * search of large collections with a large number of fields to run out of - * memory. If all of the fields contain only a single token, then the norms - * are all identical, then single norm vector may be shared. */ - private static class OneNormsReader extends FilterIndexReader { - private String field; - - public OneNormsReader(IndexReader in, String field) { - super(in); - this.field = field; - } - - @Override - public byte[] norms(String field) throws IOException { - return in.norms(this.field); - } - } - private SearchFiles() {} /** Simple command-line based search demo. */ public static void main(String[] args) throws Exception { String usage = - "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-raw] [-norms field] [-paging hitsPerPage]"; + "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-raw] [-paging hitsPerPage]"; usage += "\n\tSpecify 'false' for hitsPerPage to use streaming instead of paging search."; if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) { System.out.println(usage); @@ -80,7 +59,6 @@ String queries = null; int repeat = 0; boolean raw = false; - String normsField = null; boolean paging = true; int hitsPerPage = 10; @@ -99,9 +77,6 @@ i++; } else if ("-raw".equals(args[i])) { raw = true; - } else if ("-norms".equals(args[i])) { - normsField = args[i+1]; - i++; } else if ("-paging".equals(args[i])) { if (args[i+1].equals("false")) { paging = false; @@ -117,9 +92,6 @@ IndexReader reader = IndexReader.open(FSDirectory.open(new File(index)), true); // only searching, so read-only=true - if (normsField != null) - reader = new OneNormsReader(reader, normsField); - Searcher searcher = new IndexSearcher(reader); Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT); Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java =================================================================== --- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java (revision 1037295) +++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java (working copy) @@ -30,6 +30,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.MultiNorms; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.Fields; @@ -211,12 +212,11 @@ } } - - // create norms for (String fieldName : allFieldNames) { if (fields == null || fields.contains(fieldName)) { - getNormsByFieldNameAndDocumentNumber().put(fieldName, sourceIndexReader.norms(fieldName)); + byte norms[] = MultiNorms.norms(sourceIndexReader, fieldName); + getNormsByFieldNameAndDocumentNumber().put(fieldName, norms); } } Index: lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java =================================================================== --- lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java (revision 1037295) +++ lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java (working copy) @@ -24,6 +24,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiNorms; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; @@ -69,8 +70,7 @@ } private void testNorms(IndexReader r) throws IOException { - byte[] norms; - norms = r.norms("foo"); + byte[] norms = MultiNorms.norms(r, "foo"); if (norms != null) { assertEquals(0, norms.length); norms = new byte[10]; Index: lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java =================================================================== --- lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (revision 1037295) +++ lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (working copy) @@ -30,6 +30,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.MultiNorms; import org.apache.lucene.index.Payload; import org.apache.lucene.index.Term; import org.apache.lucene.index.DocsEnum; @@ -341,8 +342,8 @@ // test norms as used by normal use - byte[] aprioriNorms = aprioriReader.norms((String) field); - byte[] testNorms = testReader.norms((String) field); + byte[] aprioriNorms = MultiNorms.norms(aprioriReader, (String) field); + byte[] testNorms = MultiNorms.norms(testReader, (String) field); if (aprioriNorms != null) { assertEquals(aprioriNorms.length, testNorms.length); @@ -354,10 +355,10 @@ // test norms as used by multireader aprioriNorms = new byte[aprioriReader.maxDoc()]; - aprioriReader.norms((String) field, aprioriNorms, 0); + MultiNorms.norms(aprioriReader, (String) field, aprioriNorms, 0); testNorms = new byte[testReader.maxDoc()]; - testReader.norms((String) field, testNorms, 0); + MultiNorms.norms(testReader, (String) field, testNorms, 0); assertEquals(aprioriNorms.length, testNorms.length); @@ -369,10 +370,10 @@ // test norms as used by multireader aprioriNorms = new byte[aprioriReader.maxDoc() + 10]; - aprioriReader.norms((String) field, aprioriNorms, 10); + MultiNorms.norms(aprioriReader, (String) field, aprioriNorms, 10); testNorms = new byte[testReader.maxDoc() + 10]; - testReader.norms((String) field, testNorms, 10); + MultiNorms.norms(testReader, (String) field, testNorms, 10); assertEquals(aprioriNorms.length, testNorms.length); Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java (revision 1037295) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java (working copy) @@ -84,7 +84,7 @@ public void testFieldWithNoNorm() throws Exception { IndexReader r = IndexReader.open(store, false); - byte[] norms = r.norms("nonorm"); + byte[] norms = MultiNorms.norms(r, "nonorm"); // sanity check, norms should all be 1 assertTrue("Whoops we have norms?", !r.hasNorms("nonorm")); @@ -98,7 +98,7 @@ // nothing should have changed r = IndexReader.open(store, false); - norms = r.norms("nonorm"); + norms = MultiNorms.norms(r, "nonorm"); assertTrue("Whoops we have norms?", !r.hasNorms("nonorm")); assertNull(norms); @@ -183,14 +183,14 @@ public void testNormKiller() throws IOException { IndexReader r = IndexReader.open(store, false); - byte[] oldNorms = r.norms("untokfield"); + byte[] oldNorms = MultiNorms.norms(r, "untokfield"); r.close(); FieldNormModifier fnm = new FieldNormModifier(store, s); fnm.reSetNorms("untokfield"); r = IndexReader.open(store, false); - byte[] newNorms = r.norms("untokfield"); + byte[] newNorms = MultiNorms.norms(r, "untokfield"); r.close(); assertFalse(Arrays.equals(oldNorms, newNorms)); Index: lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java (revision 1037295) +++ lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java (working copy) @@ -25,6 +25,7 @@ import org.apache.lucene.index.FieldNormModifier; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.MultiNorms; import org.apache.lucene.index.Term; import org.apache.lucene.search.Collector; import org.apache.lucene.search.DefaultSimilarity; @@ -94,7 +95,7 @@ public void testFieldWithNoNorm() throws Exception { IndexReader r = IndexReader.open(store, false); - byte[] norms = r.norms("nonorm"); + byte[] norms = MultiNorms.norms(r, "nonorm"); // sanity check, norms should all be 1 assertTrue("Whoops we have norms?", !r.hasNorms("nonorm")); @@ -112,7 +113,7 @@ // nothing should have changed r = IndexReader.open(store, false); - norms = r.norms("nonorm"); + norms = MultiNorms.norms(r, "nonorm"); assertTrue("Whoops we have norms?", !r.hasNorms("nonorm")); assertNull(norms); Index: lucene/src/java/org/apache/lucene/index/DirectoryReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/DirectoryReader.java (revision 1037295) +++ lucene/src/java/org/apache/lucene/index/DirectoryReader.java (working copy) @@ -20,7 +20,6 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -31,7 +30,6 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldSelector; -import org.apache.lucene.search.Similarity; import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; import org.apache.lucene.store.LockObtainFailedException; @@ -65,7 +63,6 @@ private SegmentReader[] subReaders; private int[] starts; // 1st docno for each segment private final Map subReaderToSlice = new HashMap(); - private Map normsCache = new HashMap(); private int maxDoc = 0; private int numDocs = -1; private boolean hasDeletions = false; @@ -189,7 +186,7 @@ /** This constructor is only used for {@link #reopen()} */ DirectoryReader(Directory directory, SegmentInfos infos, SegmentReader[] oldReaders, int[] oldStarts, - Map oldNormsCache, boolean readOnly, boolean doClone, int termInfosIndexDivisor, CodecProvider codecs) throws IOException { + boolean readOnly, boolean doClone, int termInfosIndexDivisor, CodecProvider codecs) throws IOException { this.directory = directory; this.readOnly = readOnly; this.segmentInfos = infos; @@ -277,38 +274,6 @@ // initialize the readers to calculate maxDoc before we try to reuse the old normsCache initialize(newReaders); - - // try to copy unchanged norms from the old normsCache to the new one - if (oldNormsCache != null) { - for (Map.Entry entry: oldNormsCache.entrySet()) { - String field = entry.getKey(); - if (!hasNorms(field)) { - continue; - } - - byte[] oldBytes = entry.getValue(); - - byte[] bytes = new byte[maxDoc()]; - - for (int i = 0; i < subReaders.length; i++) { - Integer oldReaderIndex = segmentReaders.get(subReaders[i].getSegmentName()); - - // this SegmentReader was not re-opened, we can copy all of its norms - if (oldReaderIndex != null && - (oldReaders[oldReaderIndex.intValue()] == subReaders[i] - || oldReaders[oldReaderIndex.intValue()].norms.get(field) == subReaders[i].norms.get(field))) { - // we don't have to synchronize here: either this constructor is called from a SegmentReader, - // in which case no old norms cache is present, or it is called from MultiReader.reopen(), - // which is synchronized - System.arraycopy(oldBytes, oldStarts[oldReaderIndex.intValue()], bytes, starts[i], starts[i+1] - starts[i]); - } else { - subReaders[i].norms(field, bytes, starts[i]); - } - } - - normsCache.put(field, bytes); // update cache - } - } } /** {@inheritDoc} */ @@ -500,7 +465,7 @@ private synchronized DirectoryReader doReopen(SegmentInfos infos, boolean doClone, boolean openReadOnly) throws CorruptIndexException, IOException { DirectoryReader reader; - reader = new DirectoryReader(directory, infos, subReaders, starts, normsCache, openReadOnly, doClone, termInfosIndexDivisor, codecs); + reader = new DirectoryReader(directory, infos, subReaders, starts, openReadOnly, doClone, termInfosIndexDivisor, codecs); return reader; } @@ -640,41 +605,18 @@ @Override public synchronized byte[] norms(String field) throws IOException { ensureOpen(); - byte[] bytes = normsCache.get(field); - if (bytes != null) - return bytes; // cache hit - if (!hasNorms(field)) - return null; - - bytes = new byte[maxDoc()]; - for (int i = 0; i < subReaders.length; i++) - subReaders[i].norms(field, bytes, starts[i]); - normsCache.put(field, bytes); // update cache - return bytes; + throw new UnsupportedOperationException("please use MultiNorms.norms, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level norms"); } @Override public synchronized void norms(String field, byte[] result, int offset) throws IOException { - ensureOpen(); - byte[] bytes = normsCache.get(field); - if (bytes==null && !hasNorms(field)) { - Arrays.fill(result, offset, result.length, Similarity.getDefault().encodeNormValue(1.0f)); - } else if (bytes != null) { // cache hit - System.arraycopy(bytes, 0, result, offset, maxDoc()); - } else { - for (int i = 0; i < subReaders.length; i++) { // read from segments - subReaders[i].norms(field, result, offset + starts[i]); - } - } + throw new UnsupportedOperationException("please use MultiNorms.norms, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level norms"); } @Override protected void doSetNorm(int n, String field, byte value) throws CorruptIndexException, IOException { - synchronized (normsCache) { - normsCache.remove(field); // clear cache - } int i = readerIndex(n); // find segment num subReaders[i].setNorm(n-starts[i], field, value); // dispatch } @@ -867,7 +809,6 @@ @Override protected synchronized void doClose() throws IOException { IOException ioe = null; - normsCache = null; for (int i = 0; i < subReaders.length; i++) { // try to close each reader, even if an exception is thrown try { Index: lucene/src/java/org/apache/lucene/index/MultiNorms.java =================================================================== --- lucene/src/java/org/apache/lucene/index/MultiNorms.java (revision 0) +++ lucene/src/java/org/apache/lucene/index/MultiNorms.java (revision 0) @@ -0,0 +1,86 @@ +package org.apache.lucene.index; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.lucene.search.Similarity; +import org.apache.lucene.util.ReaderUtil; + +/** + * Exposes flex API, merged from flex API of sub-segments. + * This is useful when you're interacting with an {@link + * IndexReader} implementation that consists of sequential + * sub-readers (eg DirectoryReader or {@link + * MultiReader}). + * + *

NOTE: for multi readers, you'll get better + * performance by gathering the sub readers using {@link + * ReaderUtil#gatherSubReaders} and then operate per-reader, + * instead of using this class. + * + * @lucene.experimental + */ +public class MultiNorms { + /** + * Warning: this is heavy! Do not use in a loop, or implement norms() + * in your own reader with this (you should likely cache the result). + */ + public static byte[] norms(IndexReader r, String field) throws IOException { + final IndexReader[] subs = r.getSequentialSubReaders(); + if (subs == null) { + // already an atomic reader + return r.norms(field); + } else if (subs.length == 0 || !r.hasNorms(field)) { + // no norms + return null; + } else if (subs.length == 1) { + return norms(subs[0], field); + } else { + // TODO: optimize more maybe + byte norms[] = new byte[r.maxDoc()]; + final List leaves = new ArrayList(); + ReaderUtil.gatherSubReaders(leaves, r); + int end = 0; + for (IndexReader leaf : leaves) { + int start = end; + leaf.norms(field, norms, start); + end += leaf.maxDoc(); + } + return norms; + } + } + + /** + * Warning: this is heavy! Do not use in a loop, or implement norms() + * in your own reader with this (you should likely cache the result). + */ + public static void norms(IndexReader r, String field, byte[] bytes, int offset) + throws IOException { + // TODO: optimize more maybe + byte[] norms = norms(r, field); + if (norms == null) { + Arrays.fill(bytes, offset, bytes.length, Similarity.getDefault().encodeNormValue(1.0f)); + } else { + System.arraycopy(norms, 0, bytes, offset, r.maxDoc()); + } + } +} Property changes on: lucene\src\java\org\apache\lucene\index\MultiNorms.java ___________________________________________________________________ Added: svn:keywords + Date Author Id Revision HeadURL Added: svn:eol-style + native Index: lucene/src/java/org/apache/lucene/index/MultiReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/MultiReader.java (revision 1037295) +++ lucene/src/java/org/apache/lucene/index/MultiReader.java (working copy) @@ -18,14 +18,12 @@ */ import java.io.IOException; -import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.Map; import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldSelector; -import org.apache.lucene.search.Similarity; import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; @@ -38,7 +36,6 @@ private int[] starts; // 1st docno for each segment private final Map subReaderToSlice = new HashMap(); private boolean[] decrefOnClose; // remember which subreaders to decRef on close - private Map normsCache = new HashMap(); private int maxDoc = 0; private int numDocs = -1; private boolean hasDeletions = false; @@ -316,45 +313,18 @@ @Override public synchronized byte[] norms(String field) throws IOException { - ensureOpen(); - byte[] bytes = normsCache.get(field); - if (bytes != null) - return bytes; // cache hit - if (!hasNorms(field)) - return null; - - bytes = new byte[maxDoc()]; - for (int i = 0; i < subReaders.length; i++) - subReaders[i].norms(field, bytes, starts[i]); - normsCache.put(field, bytes); // update cache - return bytes; + throw new UnsupportedOperationException("please use MultiNorms.norms, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level norms"); } @Override public synchronized void norms(String field, byte[] result, int offset) throws IOException { - ensureOpen(); - byte[] bytes = normsCache.get(field); - for (int i = 0; i < subReaders.length; i++) // read from segments - subReaders[i].norms(field, result, offset + starts[i]); - - if (bytes==null && !hasNorms(field)) { - Arrays.fill(result, offset, result.length, Similarity.getDefault().encodeNormValue(1.0f)); - } else if (bytes != null) { // cache hit - System.arraycopy(bytes, 0, result, offset, maxDoc()); - } else { - for (int i = 0; i < subReaders.length; i++) { // read from segments - subReaders[i].norms(field, result, offset + starts[i]); - } - } + throw new UnsupportedOperationException("please use MultiNorms.norms, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level norms"); } @Override protected void doSetNorm(int n, String field, byte value) throws CorruptIndexException, IOException { - synchronized (normsCache) { - normsCache.remove(field); // clear cache - } int i = readerIndex(n); // find segment num subReaders[i].setNorm(n-starts[i], field, value); // dispatch } Index: lucene/src/java/org/apache/lucene/index/ParallelReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/ParallelReader.java (revision 1037295) +++ lucene/src/java/org/apache/lucene/index/ParallelReader.java (working copy) @@ -23,6 +23,7 @@ import org.apache.lucene.document.Fieldable; import org.apache.lucene.util.Bits; import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close +import org.apache.lucene.search.Similarity; import org.apache.lucene.util.BytesRef; import java.io.IOException; @@ -53,7 +54,8 @@ private SortedMap fieldToReader = new TreeMap(); private Map> readerToFields = new HashMap>(); private List storedFieldReaders = new ArrayList(); - + private Map normsCache = new HashMap(); + private int maxDoc; private int numDocs; private boolean hasDeletions; @@ -141,6 +143,9 @@ reader.incRef(); } decrefOnClose.add(Boolean.valueOf(incRefReaders)); + synchronized(normsCache) { + normsCache.clear(); // TODO: don't need to clear this for all fields really? + } } private class ParallelFieldsEnum extends FieldsEnum { @@ -278,6 +283,7 @@ if (reopened) { List newDecrefOnClose = new ArrayList(); + // TODO: maybe add a special reopen-ctor for norm-copying? ParallelReader pr = new ParallelReader(); for (int i = 0; i < readers.size(); i++) { IndexReader oldReader = readers.get(i); @@ -419,27 +425,51 @@ } @Override - public byte[] norms(String field) throws IOException { + public synchronized byte[] norms(String field) throws IOException { ensureOpen(); IndexReader reader = fieldToReader.get(field); - return reader==null ? null : reader.norms(field); + + if (reader==null) + return null; + + byte[] bytes = normsCache.get(field); + if (bytes != null) + return bytes; + if (!hasNorms(field)) + return null; + + bytes = MultiNorms.norms(reader, field); + normsCache.put(field, bytes); + return bytes; } @Override - public void norms(String field, byte[] result, int offset) + public synchronized void norms(String field, byte[] result, int offset) throws IOException { + // TODO: maybe optimize ensureOpen(); IndexReader reader = fieldToReader.get(field); - if (reader!=null) - reader.norms(field, result, offset); + if (reader==null) + return; + + byte[] norms = norms(field); + if (norms == null) { + Arrays.fill(result, offset, result.length, Similarity.getDefault().encodeNormValue(1.0f)); + } else { + System.arraycopy(norms, 0, result, offset, maxDoc()); + } } @Override protected void doSetNorm(int n, String field, byte value) throws CorruptIndexException, IOException { IndexReader reader = fieldToReader.get(field); - if (reader!=null) + if (reader!=null) { + synchronized(normsCache) { + normsCache.remove(field); + } reader.doSetNorm(n, field, value); + } } @Override Index: lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java =================================================================== --- lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java (revision 1037295) +++ lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java (working copy) @@ -18,6 +18,13 @@ */ import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; + +import org.apache.lucene.search.Similarity; import org.apache.lucene.util.Bits; import org.apache.lucene.util.ReaderUtil; // javadoc @@ -48,6 +55,8 @@ public final class SlowMultiReaderWrapper extends FilterIndexReader { + private final Map normsCache = new HashMap(); + public SlowMultiReaderWrapper(IndexReader other) { super(other); } @@ -62,9 +71,44 @@ return MultiFields.getDeletedDocs(in); } + @Override public IndexReader[] getSequentialSubReaders() { return null; } + + @Override + public synchronized byte[] norms(String field) throws IOException { + ensureOpen(); + byte[] bytes = normsCache.get(field); + if (bytes != null) + return bytes; + if (!hasNorms(field)) + return null; + + bytes = MultiNorms.norms(in, field); + normsCache.put(field, bytes); + return bytes; + } + + @Override + public synchronized void norms(String field, byte[] bytes, int offset) throws IOException { + // TODO: maybe optimize + ensureOpen(); + byte[] norms = norms(field); + if (norms == null) { + Arrays.fill(bytes, offset, bytes.length, Similarity.getDefault().encodeNormValue(1.0f)); + } else { + System.arraycopy(norms, 0, bytes, offset, maxDoc()); + } + } + @Override + protected void doSetNorm(int n, String field, byte value) + throws CorruptIndexException, IOException { + synchronized(normsCache) { + normsCache.remove(field); + } + in.doSetNorm(n, field, value); + } } Index: lucene/src/test/org/apache/lucene/index/TestIndexReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReader.java (revision 1037295) +++ lucene/src/test/org/apache/lucene/index/TestIndexReader.java (working copy) @@ -1336,8 +1336,8 @@ it1 = fields1.iterator(); while (it1.hasNext()) { String curField = it1.next(); - byte[] norms1 = index1.norms(curField); - byte[] norms2 = index2.norms(curField); + byte[] norms1 = MultiNorms.norms(index1, curField); + byte[] norms2 = MultiNorms.norms(index2, curField); if (norms1 != null && norms2 != null) { assertEquals(norms1.length, norms2.length); Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java (revision 1037295) +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java (working copy) @@ -272,13 +272,13 @@ * @throws Exception */ private void performDefaultTests(IndexReader r1) throws Exception { - float norm1 = Similarity.getDefault().decodeNormValue(r1.norms("field1")[4]); + float norm1 = Similarity.getDefault().decodeNormValue(MultiNorms.norms(r1, "field1")[4]); IndexReader pr1Clone = (IndexReader) r1.clone(); pr1Clone.deleteDocument(10); pr1Clone.setNorm(4, "field1", 0.5f); - assertTrue(Similarity.getDefault().decodeNormValue(r1.norms("field1")[4]) == norm1); - assertTrue(Similarity.getDefault().decodeNormValue(pr1Clone.norms("field1")[4]) != norm1); + assertTrue(Similarity.getDefault().decodeNormValue(MultiNorms.norms(r1, "field1")[4]) == norm1); + assertTrue(Similarity.getDefault().decodeNormValue(MultiNorms.norms(pr1Clone, "field1")[4]) != norm1); final Bits delDocs = MultiFields.getDeletedDocs(r1); assertTrue(delDocs == null || !delDocs.get(10)); @@ -428,7 +428,7 @@ IndexReader orig = IndexReader.open(dir1, false); orig.setNorm(1, "field1", 17.0f); final byte encoded = Similarity.getDefault().encodeNormValue(17.0f); - assertEquals(encoded, orig.norms("field1")[1]); + assertEquals(encoded, MultiNorms.norms(orig, "field1")[1]); // the cloned segmentreader should have 2 references, 1 to itself, and 1 to // the original segmentreader @@ -437,7 +437,7 @@ clonedReader.close(); IndexReader r = IndexReader.open(dir1, false); - assertEquals(encoded, r.norms("field1")[1]); + assertEquals(encoded, MultiNorms.norms(r, "field1")[1]); r.close(); dir1.close(); } Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (revision 1037295) +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (working copy) @@ -268,7 +268,7 @@ private void verifyIndex(IndexReader ir) throws IOException { for (int i = 0; i < NUM_FIELDS; i++) { String field = "f" + i; - byte b[] = ir.norms(field); + byte b[] = MultiNorms.norms(ir, field); assertEquals("number of norms mismatches", numDocNorms, b.length); ArrayList storedNorms = (i == 1 ? modifiedNorms : norms); for (int j = 0; j < b.length; j++) { Index: lucene/src/test/org/apache/lucene/index/TestNorms.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestNorms.java (revision 1037295) +++ lucene/src/test/org/apache/lucene/index/TestNorms.java (working copy) @@ -173,7 +173,7 @@ IndexReader ir = IndexReader.open(dir, false); for (int i = 0; i < NUM_FIELDS; i++) { String field = "f"+i; - byte b[] = ir.norms(field); + byte b[] = MultiNorms.norms(ir, field); assertEquals("number of norms mismatches",numDocNorms,b.length); ArrayList storedNorms = (i==1 ? modifiedNorms : norms); for (int j = 0; j < b.length; j++) { Index: lucene/src/test/org/apache/lucene/index/TestSegmentReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestSegmentReader.java (revision 1037295) +++ lucene/src/test/org/apache/lucene/index/TestSegmentReader.java (working copy) @@ -181,11 +181,11 @@ assertEquals(reader.hasNorms(f.name()), !DocHelper.noNorms.containsKey(f.name())); if (!reader.hasNorms(f.name())) { // test for fake norms of 1.0 or null depending on the flag - byte [] norms = reader.norms(f.name()); + byte [] norms = MultiNorms.norms(reader, f.name()); byte norm1 = Similarity.getDefault().encodeNormValue(1.0f); assertNull(norms); norms = new byte[reader.maxDoc()]; - reader.norms(f.name(),norms, 0); + MultiNorms.norms(reader, f.name(),norms, 0); for (int j=0; j