Index: CHANGES.txt =================================================================== --- CHANGES.txt (revision 735652) +++ CHANGES.txt (working copy) @@ -46,6 +46,11 @@ newly added NoSuchDirectoryException if the directory does not exist. (Marcel Reutegger, Mike McCandless) +7. LUCENE-1483: Added new MultiReaderHitCollector which enables faster + hit collection by notifying the collector for each sub-reader + that's visited. All core collectors now use this API. (Mark + Miller, Mike McCandless) + Bug fixes 1. LUCENE-1415: MultiPhraseQuery has incorrect hashCode() and equals() @@ -145,6 +150,13 @@ 4. LUCENE-1224: Short circuit FuzzyQuery.rewrite when input token length is small compared to minSimilarity. (Timo Nentwig, Mark Miller) + 5. LUCENE-1483: When searching over multiple segments we now visit + each sub-reader one at a time. This speeds up warming, since + FieldCache entries (if required) can be shared across reopens for + those segments that did not change, and also speeds up searches + that sort by relevance or by field values. (Mark Miller, Mike + McCandless) + Documentation Build Index: src/test/org/apache/lucene/search/TestSort.java =================================================================== --- src/test/org/apache/lucene/search/TestSort.java (revision 735652) +++ src/test/org/apache/lucene/search/TestSort.java (working copy) @@ -18,27 +18,30 @@ */ import junit.framework.Test; -import junit.framework.TestCase; import junit.framework.TestSuite; import junit.textui.TestRunner; import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; +import org.apache.lucene.queryParser.ParseException; +import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.DocIdBitSet; +import org.apache.lucene.util.LuceneTestCase; import java.io.IOException; import java.io.Serializable; import java.rmi.Naming; import java.rmi.registry.LocateRegistry; -import java.rmi.registry.Registry; import java.util.BitSet; import java.util.HashMap; import java.util.Iterator; import java.util.Locale; +import java.util.Random; import java.util.regex.Pattern; /** @@ -51,472 +54,640 @@ */ public class TestSort -extends TestCase +extends LuceneTestCase implements Serializable { - private Searcher full; - private Searcher searchX; - private Searcher searchY; - private Query queryX; - private Query queryY; - private Query queryA; - private Query queryE; - private Query queryF; - private Query queryG; - private Sort sort; + private static final int NUM_STRINGS = 6000; + private Searcher full; + private Searcher searchX; + private Searcher searchY; + private Query queryX; + private Query queryY; + private Query queryA; + private Query queryE; + private Query queryF; + private Query queryG; + private Sort sort; - public TestSort (String name) { - super (name); - } + public TestSort (String name) { + super (name); + } - public static void main (String[] argv) { - if (argv == null || argv.length < 1) - TestRunner.run (suite()); - else if ("server".equals (argv[0])) { - TestSort test = new TestSort (null); - try { - test.startServer(); - Thread.sleep (500000); - } catch (Exception e) { - System.out.println (e); - e.printStackTrace(); - } - } - } + public static void main (String[] argv) { + if (argv == null || argv.length < 1) + TestRunner.run (suite()); + else if ("server".equals (argv[0])) { + TestSort test = new TestSort (null); + try { + test.startServer(); + Thread.sleep (500000); + } catch (Exception e) { + System.out.println (e); + e.printStackTrace(); + } + } + } - public static Test suite() { - return new TestSuite (TestSort.class); - } + public static Test suite() { + return new TestSuite (TestSort.class); + } - // document data: - // the tracer field is used to determine which document was hit - // the contents field is used to search and sort by relevance - // the int field to sort by int - // the float field to sort by float - // the string field to sort by string + // document data: + // the tracer field is used to determine which document was hit + // the contents field is used to search and sort by relevance + // the int field to sort by int + // the float field to sort by float + // the string field to sort by string // the i18n field includes accented characters for testing locale-specific sorting - private String[][] data = new String[][] { - // tracer contents int float string custom i18n long double, 'short', byte, 'custom parser encoding' - { "A", "x a", "5", "4f", "c", "A-3", "p\u00EAche", "10", "-4.0", "3", "126", "J"},//A, x - { "B", "y a", "5", "3.4028235E38", "i", "B-10", "HAT", "1000000000", "40.0", "24", "1", "I"},//B, y - { "C", "x a b c", "2147483647", "1.0", "j", "A-2", "p\u00E9ch\u00E9", "99999999", "40.00002343", "125", "15", "H"},//C, x - { "D", "y a b c", "-1", "0.0f", "a", "C-0", "HUT", String.valueOf(Long.MAX_VALUE), String.valueOf(Double.MIN_VALUE), String.valueOf(Short.MIN_VALUE), String.valueOf(Byte.MIN_VALUE), "G"},//D, y - { "E", "x a b c d", "5", "2f", "h", "B-8", "peach", String.valueOf(Long.MIN_VALUE), String.valueOf(Double.MAX_VALUE), String.valueOf(Short.MAX_VALUE), String.valueOf(Byte.MAX_VALUE), "F"},//E,x - { "F", "y a b c d", "2", "3.14159f", "g", "B-1", "H\u00C5T", "-44", "343.034435444", "-3", "0", "E"},//F,y - { "G", "x a b c d", "3", "-1.0", "f", "C-100", "sin", "323254543543", "4.043544", "5", "100", "D"},//G,x + private String[][] data = new String[][] { + // tracer contents int float string custom i18n long double, 'short', byte, 'custom parser encoding' + { "A", "x a", "5", "4f", "c", "A-3", "p\u00EAche", "10", "-4.0", "3", "126", "J"},//A, x + { "B", "y a", "5", "3.4028235E38", "i", "B-10", "HAT", "1000000000", "40.0", "24", "1", "I"},//B, y + { "C", "x a b c", "2147483647", "1.0", "j", "A-2", "p\u00E9ch\u00E9", "99999999", "40.00002343", "125", "15", "H"},//C, x + { "D", "y a b c", "-1", "0.0f", "a", "C-0", "HUT", String.valueOf(Long.MAX_VALUE), String.valueOf(Double.MIN_VALUE), String.valueOf(Short.MIN_VALUE), String.valueOf(Byte.MIN_VALUE), "G"},//D, y + { "E", "x a b c d", "5", "2f", "h", "B-8", "peach", String.valueOf(Long.MIN_VALUE), String.valueOf(Double.MAX_VALUE), String.valueOf(Short.MAX_VALUE), String.valueOf(Byte.MAX_VALUE), "F"},//E,x + { "F", "y a b c d", "2", "3.14159f", "g", "B-1", "H\u00C5T", "-44", "343.034435444", "-3", "0", "E"},//F,y + { "G", "x a b c d", "3", "-1.0", "f", "C-100", "sin", "323254543543", "4.043544", "5", "100", "D"},//G,x { "H", "y a b c d", "0", "1.4E-45", "e", "C-88", "H\u00D8T", "1023423423005","4.043545", "10", "-50", "C"},//H,y - { "I", "x a b c d e f", "-2147483648", "1.0e+0", "d", "A-10", "s\u00EDn", "332422459999", "4.043546", "-340", "51", "B"},//I,x - { "J", "y a b c d e f", "4", ".5", "b", "C-7", "HOT", "34334543543", "4.0000220343", "300", "2", "A"},//J,y - { "W", "g", "1", null, null, null, null, null, null, null, null, null}, - { "X", "g", "1", "0.1", null, null, null, null, null, null, null, null}, - { "Y", "g", "1", "0.2", null, null, null, null, null, null, null, null}, - { "Z", "f g", null, null, null, null, null, null, null, null, null, null} - }; + { "I", "x a b c d e f", "-2147483648", "1.0e+0", "d", "A-10", "s\u00EDn", "332422459999", "4.043546", "-340", "51", "B"},//I,x + { "J", "y a b c d e f", "4", ".5", "b", "C-7", "HOT", "34334543543", "4.0000220343", "300", "2", "A"},//J,y + { "W", "g", "1", null, null, null, null, null, null, null, null, null}, + { "X", "g", "1", "0.1", null, null, null, null, null, null, null, null}, + { "Y", "g", "1", "0.2", null, null, null, null, null, null, null, null}, + { "Z", "f g", null, null, null, null, null, null, null, null, null, null} + }; - // create an index of all the documents, or just the x, or just the y documents - private Searcher getIndex (boolean even, boolean odd) - throws IOException { - RAMDirectory indexStore = new RAMDirectory (); - IndexWriter writer = new IndexWriter (indexStore, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); - for (int i=0; i= 0)) { // ensure first field is in order + fail = true; + System.out.println("fail:" + v[j] + " < " + last); + } + if (cmp == 0) { // ensure second field is in reverse order + cmp = v2[j].compareTo(lastSub); + if (cmp > 0) { + fail = true; + System.out.println("rev field fail:" + v2[j] + " > " + lastSub); + } else if(cmp == 0) { // ensure docid is in order + if (result[x].doc < lastDocId) { + fail = true; + System.out.println("doc fail:" + result[x].doc + " > " + lastDocId); + } + } + } + } + last = v[j]; + lastSub = v2[j]; + lastDocId = result[x].doc; + buff.append(v[j] + "(" + v2[j] + ")(" + result[x].doc+") "); + } + } + if(fail) { + System.out.println("topn field1(field2)(docID):" + buff); + } + assertFalse("Found sort results out of order", fail); - sort.setSort (new SortField[] { new SortField ("parser", new ExtendedFieldCache.LongParser(){ - public final long parseLong(final String val) { - return (long) (val.charAt(0)-'A') * 1234567890L; - } - }), SortField.FIELD_DOC }); - assertMatches (full, queryA, sort, "JIHGFEDCBA"); + } + + // test sorts where the type of field is specified and a custom field parser is used, that + // uses a simple char encoding. The sorted string contains a character beginning from 'A' that + // is mapped to a numeric value using some "funny" algorithm to be different for each data type. + public void testCustomFieldParserSort() throws Exception { + sort.setSort (new SortField[] { new SortField ("parser", new FieldCache.IntParser(){ + public final int parseInt(final String val) { + return (int) (val.charAt(0)-'A') * 123456; + } + }), SortField.FIELD_DOC }); + assertMatches (full, queryA, sort, "JIHGFEDCBA"); - sort.setSort (new SortField[] { new SortField ("parser", new ExtendedFieldCache.DoubleParser(){ - public final double parseDouble(final String val) { - return Math.pow( (double) val.charAt(0), (double) (val.charAt(0)-'A') ); - } - }), SortField.FIELD_DOC }); - assertMatches (full, queryA, sort, "JIHGFEDCBA"); + sort.setSort (new SortField[] { new SortField ("parser", new FieldCache.FloatParser(){ + public final float parseFloat(final String val) { + return (float) Math.sqrt( (double) val.charAt(0) ); + } + }), SortField.FIELD_DOC }); + assertMatches (full, queryA, sort, "JIHGFEDCBA"); - sort.setSort (new SortField[] { new SortField ("parser", new FieldCache.ByteParser(){ - public final byte parseByte(final String val) { - return (byte) (val.charAt(0)-'A'); - } - }), SortField.FIELD_DOC }); - assertMatches (full, queryA, sort, "JIHGFEDCBA"); + sort.setSort (new SortField[] { new SortField ("parser", new ExtendedFieldCache.LongParser(){ + public final long parseLong(final String val) { + return (long) (val.charAt(0)-'A') * 1234567890L; + } + }), SortField.FIELD_DOC }); + assertMatches (full, queryA, sort, "JIHGFEDCBA"); - sort.setSort (new SortField[] { new SortField ("parser", new FieldCache.ShortParser(){ - public final short parseShort(final String val) { - return (short) (val.charAt(0)-'A'); - } - }), SortField.FIELD_DOC }); - assertMatches (full, queryA, sort, "JIHGFEDCBA"); - } + sort.setSort (new SortField[] { new SortField ("parser", new ExtendedFieldCache.DoubleParser(){ + public final double parseDouble(final String val) { + return Math.pow( (double) val.charAt(0), (double) (val.charAt(0)-'A') ); + } + }), SortField.FIELD_DOC }); + assertMatches (full, queryA, sort, "JIHGFEDCBA"); - // test sorts when there's nothing in the index - public void testEmptyIndex() throws Exception { - Searcher empty = getEmptyIndex(); + sort.setSort (new SortField[] { new SortField ("parser", new FieldCache.ByteParser(){ + public final byte parseByte(final String val) { + return (byte) (val.charAt(0)-'A'); + } + }), SortField.FIELD_DOC }); + assertMatches (full, queryA, sort, "JIHGFEDCBA"); - sort = new Sort(); - assertMatches (empty, queryX, sort, ""); + sort.setSort (new SortField[] { new SortField ("parser", new FieldCache.ShortParser(){ + public final short parseShort(final String val) { + return (short) (val.charAt(0)-'A'); + } + }), SortField.FIELD_DOC }); + assertMatches (full, queryA, sort, "JIHGFEDCBA"); + } - sort.setSort(SortField.FIELD_DOC); - assertMatches (empty, queryX, sort, ""); + // test sorts when there's nothing in the index + public void testEmptyIndex() throws Exception { + Searcher empty = getEmptyIndex(); - sort.setSort (new SortField[] { new SortField ("int", SortField.INT), SortField.FIELD_DOC }); - assertMatches (empty, queryX, sort, ""); + sort = new Sort(); + assertMatches (empty, queryX, sort, ""); - sort.setSort (new SortField[] { new SortField ("string", SortField.STRING, true), SortField.FIELD_DOC }); - assertMatches (empty, queryX, sort, ""); + sort.setSort(SortField.FIELD_DOC); + assertMatches (empty, queryX, sort, ""); - sort.setSort (new SortField[] { new SortField ("float", SortField.FLOAT), new SortField ("string", SortField.STRING) }); - assertMatches (empty, queryX, sort, ""); - } + sort.setSort (new SortField[] { new SortField ("int", SortField.INT), SortField.FIELD_DOC }); + assertMatches (empty, queryX, sort, ""); - // test sorts where the type of field is determined dynamically - public void testAutoSort() throws Exception { - sort.setSort("int"); - assertMatches (full, queryX, sort, "IGAEC"); - assertMatches (full, queryY, sort, "DHFJB"); + sort.setSort (new SortField[] { new SortField ("string", SortField.STRING, true), SortField.FIELD_DOC }); + assertMatches (empty, queryX, sort, ""); - sort.setSort("float"); - assertMatches (full, queryX, sort, "GCIEA"); - assertMatches (full, queryY, sort, "DHJFB"); + sort.setSort (new SortField[] { new SortField ("float", SortField.FLOAT), new SortField ("string", SortField.STRING) }); + assertMatches (empty, queryX, sort, ""); + } - sort.setSort("string"); - assertMatches (full, queryX, sort, "AIGEC"); - assertMatches (full, queryY, sort, "DJHFB"); - } + static class MyFieldComparator extends FieldComparator { + int[] docValues; + int[] slotValues; + int bottomValue; - // test sorts in reverse - public void testReverseSort() throws Exception { - sort.setSort (new SortField[] { new SortField (null, SortField.SCORE, true), SortField.FIELD_DOC }); - assertMatches (full, queryX, sort, "IEGCA"); - assertMatches (full, queryY, sort, "JFHDB"); + MyFieldComparator(int numHits) { + slotValues = new int[numHits]; + } - sort.setSort (new SortField (null, SortField.DOC, true)); - assertMatches (full, queryX, sort, "IGECA"); - assertMatches (full, queryY, sort, "JHFDB"); + public void copy(int slot, int doc, float score) { + slotValues[slot] = docValues[doc]; + } - sort.setSort ("int", true); - assertMatches (full, queryX, sort, "CAEGI"); - assertMatches (full, queryY, sort, "BJFHD"); + public int compare(int slot1, int slot2) { + return slotValues[slot1] - slotValues[slot2]; + } - sort.setSort ("float", true); - assertMatches (full, queryX, sort, "AECIG"); - assertMatches (full, queryY, sort, "BFJHD"); + public int compareBottom(int doc, float score) { + return bottomValue - docValues[doc]; + } - sort.setSort ("string", true); - assertMatches (full, queryX, sort, "CEGIA"); - assertMatches (full, queryY, sort, "BFHJD"); - } + public void setBottom(int bottom) { + bottomValue = slotValues[bottom]; + } - // test sorting when the sort field is empty (undefined) for some of the documents - public void testEmptyFieldSort() throws Exception { - sort.setSort ("string"); - assertMatches (full, queryF, sort, "ZJI"); + public void setNextReader(IndexReader reader, int docBase, int numSlotsFull) throws IOException { + docValues = FieldCache.DEFAULT.getInts(reader, "parser", new FieldCache.IntParser() { + public final int parseInt(final String val) { + return (int) (val.charAt(0)-'A') * 123456; + } + }); + } - sort.setSort ("string", true); - assertMatches (full, queryF, sort, "IJZ"); - - sort.setSort (new SortField ("i18n", Locale.ENGLISH)); - assertMatches (full, queryF, sort, "ZJI"); - - sort.setSort (new SortField ("i18n", Locale.ENGLISH, true)); - assertMatches (full, queryF, sort, "IJZ"); + public int sortType() { + return 0; + } - sort.setSort ("int"); - assertMatches (full, queryF, sort, "IZJ"); + public Comparable value(int slot) { + return new Integer(slotValues[slot]); + } + } - sort.setSort ("int", true); - assertMatches (full, queryF, sort, "JZI"); + static class MyFieldComparatorSource extends FieldComparatorSource { + FieldComparator newComparator(String fieldname, IndexReader[] subReaders, int numHits, int sortPos, boolean reversed) { + return new MyFieldComparator(numHits); + } + } - sort.setSort ("float"); - assertMatches (full, queryF, sort, "ZJI"); + // Test sorting w/ custom FieldComparator + public void testNewCustomFieldParserSort() throws Exception { + sort.setSort (new SortField[] { new SortField ("parser", new MyFieldComparatorSource())}); + assertMatches (full, queryA, sort, "JIHGFEDCBA"); + } - // using a nonexisting field as first sort key shouldn't make a difference: - sort.setSort (new SortField[] { new SortField ("nosuchfield", SortField.STRING), - new SortField ("float") }); - assertMatches (full, queryF, sort, "ZJI"); + // test sorts where the type of field is determined dynamically + public void testAutoSort() throws Exception { + sort.setSort("int"); + assertMatches (full, queryX, sort, "IGAEC"); + assertMatches (full, queryY, sort, "DHFJB"); - sort.setSort ("float", true); - assertMatches (full, queryF, sort, "IJZ"); + sort.setSort("float"); + assertMatches (full, queryX, sort, "GCIEA"); + assertMatches (full, queryY, sort, "DHJFB"); - // When a field is null for both documents, the next SortField should be used. + sort.setSort("string"); + assertMatches (full, queryX, sort, "AIGEC"); + assertMatches (full, queryY, sort, "DJHFB"); + } + + // test sorts in reverse + public void testReverseSort() throws Exception { + sort.setSort (new SortField[] { new SortField (null, SortField.SCORE, true), SortField.FIELD_DOC }); + assertMatches (full, queryX, sort, "IEGCA"); + assertMatches (full, queryY, sort, "JFHDB"); + + sort.setSort (new SortField (null, SortField.DOC, true)); + assertMatches (full, queryX, sort, "IGECA"); + assertMatches (full, queryY, sort, "JHFDB"); + + sort.setSort ("int", true); + assertMatches (full, queryX, sort, "CAEGI"); + assertMatches (full, queryY, sort, "BJFHD"); + + sort.setSort ("float", true); + assertMatches (full, queryX, sort, "AECIG"); + assertMatches (full, queryY, sort, "BFJHD"); + + sort.setSort ("string", true); + assertMatches (full, queryX, sort, "CEGIA"); + assertMatches (full, queryY, sort, "BFHJD"); + } + + // test sorting when the sort field is empty (undefined) for some of the documents + public void testEmptyFieldSort() throws Exception { + sort.setSort ("string"); + assertMatches (full, queryF, sort, "ZJI"); + + sort.setSort ("string", true); + assertMatches (full, queryF, sort, "IJZ"); + + sort.setSort (new SortField ("i18n", Locale.ENGLISH)); + assertMatches (full, queryF, sort, "ZJI"); + + sort.setSort (new SortField ("i18n", Locale.ENGLISH, true)); + assertMatches (full, queryF, sort, "IJZ"); + + sort.setSort ("int"); + assertMatches (full, queryF, sort, "IZJ"); + + sort.setSort ("int", true); + assertMatches (full, queryF, sort, "JZI"); + + sort.setSort ("float"); + assertMatches (full, queryF, sort, "ZJI"); + + // using a nonexisting field as first sort key shouldn't make a difference: + sort.setSort (new SortField[] { new SortField ("nosuchfield", SortField.STRING), + new SortField ("float") }); + assertMatches (full, queryF, sort, "ZJI"); + + sort.setSort ("float", true); + assertMatches (full, queryF, sort, "IJZ"); + + // When a field is null for both documents, the next SortField should be used. // Works for - sort.setSort (new SortField[] { new SortField ("int"), + sort.setSort (new SortField[] { new SortField ("int"), new SortField ("string", SortField.STRING), - new SortField ("float") }); - assertMatches (full, queryG, sort, "ZWXY"); + new SortField ("float") }); + assertMatches (full, queryG, sort, "ZWXY"); - // Reverse the last criterium to make sure the test didn't pass by chance - sort.setSort (new SortField[] { new SortField ("int"), + // Reverse the last criterium to make sure the test didn't pass by chance + sort.setSort (new SortField[] { new SortField ("int"), new SortField ("string", SortField.STRING), - new SortField ("float", true) }); - assertMatches (full, queryG, sort, "ZYXW"); + new SortField ("float", true) }); + assertMatches (full, queryG, sort, "ZYXW"); - // Do the same for a MultiSearcher - Searcher multiSearcher=new MultiSearcher (new Searchable[] { full }); + // Do the same for a MultiSearcher + Searcher multiSearcher=new MultiSearcher (new Searchable[] { full }); - sort.setSort (new SortField[] { new SortField ("int"), + sort.setSort (new SortField[] { new SortField ("int"), new SortField ("string", SortField.STRING), - new SortField ("float") }); - assertMatches (multiSearcher, queryG, sort, "ZWXY"); + new SortField ("float") }); + assertMatches (multiSearcher, queryG, sort, "ZWXY"); - sort.setSort (new SortField[] { new SortField ("int"), + sort.setSort (new SortField[] { new SortField ("int"), new SortField ("string", SortField.STRING), - new SortField ("float", true) }); - assertMatches (multiSearcher, queryG, sort, "ZYXW"); - // Don't close the multiSearcher. it would close the full searcher too! + new SortField ("float", true) }); + assertMatches (multiSearcher, queryG, sort, "ZYXW"); + // Don't close the multiSearcher. it would close the full searcher too! - // Do the same for a ParallelMultiSearcher + // Do the same for a ParallelMultiSearcher Searcher parallelSearcher=new ParallelMultiSearcher (new Searchable[] { full }); - sort.setSort (new SortField[] { new SortField ("int"), + sort.setSort (new SortField[] { new SortField ("int"), new SortField ("string", SortField.STRING), - new SortField ("float") }); - assertMatches (parallelSearcher, queryG, sort, "ZWXY"); + new SortField ("float") }); + assertMatches (parallelSearcher, queryG, sort, "ZWXY"); - sort.setSort (new SortField[] { new SortField ("int"), + sort.setSort (new SortField[] { new SortField ("int"), new SortField ("string", SortField.STRING), - new SortField ("float", true) }); - assertMatches (parallelSearcher, queryG, sort, "ZYXW"); - // Don't close the parallelSearcher. it would close the full searcher too! - } + new SortField ("float", true) }); + assertMatches (parallelSearcher, queryG, sort, "ZYXW"); + // Don't close the parallelSearcher. it would close the full searcher too! + } - // test sorts using a series of fields - public void testSortCombos() throws Exception { - sort.setSort (new String[] {"int","float"}); - assertMatches (full, queryX, sort, "IGEAC"); + // test sorts using a series of fields + public void testSortCombos() throws Exception { + sort.setSort (new String[] {"int","float"}); + assertMatches (full, queryX, sort, "IGEAC"); - sort.setSort (new SortField[] { new SortField ("int", true), new SortField (null, SortField.DOC, true) }); - assertMatches (full, queryX, sort, "CEAGI"); + sort.setSort (new SortField[] { new SortField ("int", true), new SortField (null, SortField.DOC, true) }); + assertMatches (full, queryX, sort, "CEAGI"); - sort.setSort (new String[] {"float","string"}); - assertMatches (full, queryX, sort, "GICEA"); - } + sort.setSort (new String[] {"float","string"}); + assertMatches (full, queryX, sort, "GICEA"); + } - // test using a Locale for sorting strings - public void testLocaleSort() throws Exception { - sort.setSort (new SortField[] { new SortField ("string", Locale.US) }); - assertMatches (full, queryX, sort, "AIGEC"); - assertMatches (full, queryY, sort, "DJHFB"); + // test using a Locale for sorting strings + public void testLocaleSort() throws Exception { + sort.setSort (new SortField[] { new SortField ("string", Locale.US) }); + assertMatches (full, queryX, sort, "AIGEC"); + assertMatches (full, queryY, sort, "DJHFB"); - sort.setSort (new SortField[] { new SortField ("string", Locale.US, true) }); - assertMatches (full, queryX, sort, "CEGIA"); - assertMatches (full, queryY, sort, "BFHJD"); - } + sort.setSort (new SortField[] { new SortField ("string", Locale.US, true) }); + assertMatches (full, queryX, sort, "CEGIA"); + assertMatches (full, queryY, sort, "BFHJD"); + } - // test using various international locales with accented characters - // (which sort differently depending on locale) - public void testInternationalSort() throws Exception { - sort.setSort (new SortField ("i18n", Locale.US)); - assertMatches (full, queryY, sort, "BFJDH"); + // test using various international locales with accented characters + // (which sort differently depending on locale) + public void testInternationalSort() throws Exception { + sort.setSort (new SortField ("i18n", Locale.US)); + assertMatches (full, queryY, sort, "BFJDH"); - sort.setSort (new SortField ("i18n", new Locale("sv", "se"))); - assertMatches (full, queryY, sort, "BJDFH"); + sort.setSort (new SortField ("i18n", new Locale("sv", "se"))); + assertMatches (full, queryY, sort, "BJDFH"); - sort.setSort (new SortField ("i18n", new Locale("da", "dk"))); - assertMatches (full, queryY, sort, "BJDHF"); + sort.setSort (new SortField ("i18n", new Locale("da", "dk"))); + assertMatches (full, queryY, sort, "BJDHF"); - sort.setSort (new SortField ("i18n", Locale.US)); - assertMatches (full, queryX, sort, "ECAGI"); + sort.setSort (new SortField ("i18n", Locale.US)); + assertMatches (full, queryX, sort, "ECAGI"); - sort.setSort (new SortField ("i18n", Locale.FRANCE)); - assertMatches (full, queryX, sort, "EACGI"); - } + sort.setSort (new SortField ("i18n", Locale.FRANCE)); + assertMatches (full, queryX, sort, "EACGI"); + } // Test the MultiSearcher's ability to preserve locale-sensitive ordering // by wrapping it around a single searcher - public void testInternationalMultiSearcherSort() throws Exception { - Searcher multiSearcher = new MultiSearcher (new Searchable[] { full }); - - sort.setSort (new SortField ("i18n", new Locale("sv", "se"))); - assertMatches (multiSearcher, queryY, sort, "BJDFH"); - - sort.setSort (new SortField ("i18n", Locale.US)); - assertMatches (multiSearcher, queryY, sort, "BFJDH"); - - sort.setSort (new SortField ("i18n", new Locale("da", "dk"))); - assertMatches (multiSearcher, queryY, sort, "BJDHF"); - } + public void testInternationalMultiSearcherSort() throws Exception { + Searcher multiSearcher = new MultiSearcher (new Searchable[] { full }); - // test a custom sort function - public void testCustomSorts() throws Exception { - sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource())); - assertMatches (full, queryX, sort, "CAIEG"); - sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource(), true)); - assertMatches (full, queryY, sort, "HJDBF"); - SortComparator custom = SampleComparable.getComparator(); - sort.setSort (new SortField ("custom", custom)); - assertMatches (full, queryX, sort, "CAIEG"); - sort.setSort (new SortField ("custom", custom, true)); - assertMatches (full, queryY, sort, "HJDBF"); - } + sort.setSort (new SortField ("i18n", new Locale("sv", "se"))); + assertMatches (multiSearcher, queryY, sort, "BJDFH"); + + sort.setSort (new SortField ("i18n", Locale.US)); + assertMatches (multiSearcher, queryY, sort, "BFJDH"); + + sort.setSort (new SortField ("i18n", new Locale("da", "dk"))); + assertMatches (multiSearcher, queryY, sort, "BJDHF"); + } + + // test a custom sort function + public void testCustomSorts() throws Exception { + sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource())); + assertMatches (full, queryX, sort, "CAIEG"); + sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource(), true)); + assertMatches (full, queryY, sort, "HJDBF"); + SortComparator custom = SampleComparable.getComparator(); + sort.setSort (new SortField ("custom", custom)); + assertMatches (full, queryX, sort, "CAIEG"); + sort.setSort (new SortField ("custom", custom, true)); + assertMatches (full, queryY, sort, "HJDBF"); + } - // test a variety of sorts using more than one searcher - public void testMultiSort() throws Exception { - MultiSearcher searcher = new MultiSearcher (new Searchable[] { searchX, searchY }); - runMultiSorts (searcher); - } + // test a variety of sorts using more than one searcher + public void testMultiSort() throws Exception { + MultiSearcher searcher = new MultiSearcher (new Searchable[] { searchX, searchY }); + runMultiSorts (searcher); + } - // test a variety of sorts using a parallel multisearcher - public void testParallelMultiSort() throws Exception { - Searcher searcher = new ParallelMultiSearcher (new Searchable[] { searchX, searchY }); - runMultiSorts (searcher); - } + // test a variety of sorts using a parallel multisearcher + public void testParallelMultiSort() throws Exception { + Searcher searcher = new ParallelMultiSearcher (new Searchable[] { searchX, searchY }); + runMultiSorts (searcher); + } - // test a variety of sorts using a remote searcher - public void testRemoteSort() throws Exception { - Searchable searcher = getRemote(); - MultiSearcher multi = new MultiSearcher (new Searchable[] { searcher }); - runMultiSorts (multi); - } + // test a variety of sorts using a remote searcher + public void testRemoteSort() throws Exception { + Searchable searcher = getRemote(); + MultiSearcher multi = new MultiSearcher (new Searchable[] { searcher }); + runMultiSorts (multi); + } - // test custom search when remote - public void testRemoteCustomSort() throws Exception { - Searchable searcher = getRemote(); - MultiSearcher multi = new MultiSearcher (new Searchable[] { searcher }); - sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource())); - assertMatches (multi, queryX, sort, "CAIEG"); - sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource(), true)); - assertMatches (multi, queryY, sort, "HJDBF"); - SortComparator custom = SampleComparable.getComparator(); - sort.setSort (new SortField ("custom", custom)); - assertMatches (multi, queryX, sort, "CAIEG"); - sort.setSort (new SortField ("custom", custom, true)); - assertMatches (multi, queryY, sort, "HJDBF"); - } + // test custom search when remote + public void testRemoteCustomSort() throws Exception { + Searchable searcher = getRemote(); + MultiSearcher multi = new MultiSearcher (new Searchable[] { searcher }); + sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource())); + assertMatches (multi, queryX, sort, "CAIEG"); + sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource(), true)); + assertMatches (multi, queryY, sort, "HJDBF"); + SortComparator custom = SampleComparable.getComparator(); + sort.setSort (new SortField ("custom", custom)); + assertMatches (multi, queryX, sort, "CAIEG"); + sort.setSort (new SortField ("custom", custom, true)); + assertMatches (multi, queryY, sort, "HJDBF"); + } - // test that the relevancy scores are the same even if - // hits are sorted - public void testNormalizedScores() throws Exception { + // test that the relevancy scores are the same even if + // hits are sorted + public void testNormalizedScores() throws Exception { - // capture relevancy scores + // capture relevancy scores HashMap scoresX = getScores (full.search (queryX, null, 1000).scoreDocs, full); HashMap scoresY = getScores (full.search (queryY, null, 1000).scoreDocs, full); HashMap scoresA = getScores (full.search (queryA, null, 1000).scoreDocs, full); - // we'll test searching locally, remote and multi - MultiSearcher remote = new MultiSearcher (new Searchable[] { getRemote() }); - MultiSearcher multi = new MultiSearcher (new Searchable[] { searchX, searchY }); + // we'll test searching locally, remote and multi + MultiSearcher remote = new MultiSearcher (new Searchable[] { getRemote() }); + MultiSearcher multi = new MultiSearcher (new Searchable[] { searchX, searchY }); - // change sorting and make sure relevancy stays the same + // change sorting and make sure relevancy stays the same - sort = new Sort(); + sort = new Sort(); assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full)); assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi)); @@ -527,7 +698,7 @@ assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi)); - sort.setSort(SortField.FIELD_DOC); + sort.setSort(SortField.FIELD_DOC); assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full)); assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi)); @@ -538,7 +709,7 @@ assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi)); - sort.setSort ("int"); + sort.setSort ("int"); assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full)); assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi)); @@ -549,7 +720,7 @@ assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi)); - sort.setSort ("float"); + sort.setSort ("float"); assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full)); assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi)); @@ -560,7 +731,7 @@ assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi)); - sort.setSort ("string"); + sort.setSort ("string"); assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full)); assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi)); @@ -571,7 +742,7 @@ assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi)); - sort.setSort (new String[] {"int","float"}); + sort.setSort (new String[] {"int","float"}); assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full)); assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi)); @@ -582,7 +753,7 @@ assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi)); - sort.setSort (new SortField[] { new SortField ("int", true), new SortField (null, SortField.DOC, true) }); + sort.setSort (new SortField[] { new SortField ("int", true), new SortField (null, SortField.DOC, true) }); assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full)); assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi)); @@ -593,7 +764,7 @@ assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi)); - sort.setSort (new String[] {"float","string"}); + sort.setSort (new String[] {"float","string"}); assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full)); assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi)); @@ -604,7 +775,7 @@ assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote)); assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi)); - } + } public void testTopDocsScores() throws Exception { @@ -621,6 +792,7 @@ Filter filt = new Filter() { public DocIdSet getDocIdSet(IndexReader reader) throws IOException { BitSet bs = new BitSet(reader.maxDoc()); + bs.set(0, reader.maxDoc()); bs.set(docs1.scoreDocs[0].doc); return new DocIdBitSet(bs); } @@ -631,121 +803,123 @@ assertEquals(docs1.scoreDocs[0].score, docs2.scoreDocs[0].score, 1e-6); } - // runs a variety of sorts useful for multisearchers - private void runMultiSorts (Searcher multi) throws Exception { - sort.setSort (SortField.FIELD_DOC); - assertMatchesPattern (multi, queryA, sort, "[AB]{2}[CD]{2}[EF]{2}[GH]{2}[IJ]{2}"); + private void runMultiSorts (Searcher multi) throws Exception { + sort.setSort (SortField.FIELD_DOC); + assertMatchesPattern (multi, queryA, sort, "[AB]{2}[CD]{2}[EF]{2}[GH]{2}[IJ]{2}"); - sort.setSort (new SortField ("int", SortField.INT)); - assertMatchesPattern (multi, queryA, sort, "IDHFGJ[ABE]{3}C"); + sort.setSort (new SortField ("int", SortField.INT)); + assertMatchesPattern (multi, queryA, sort, "IDHFGJ[ABE]{3}C"); - sort.setSort (new SortField[] {new SortField ("int", SortField.INT), SortField.FIELD_DOC}); - assertMatchesPattern (multi, queryA, sort, "IDHFGJ[AB]{2}EC"); + sort.setSort (new SortField[] {new SortField ("int", SortField.INT), SortField.FIELD_DOC}); + assertMatchesPattern (multi, queryA, sort, "IDHFGJ[AB]{2}EC"); - sort.setSort ("int"); - assertMatchesPattern (multi, queryA, sort, "IDHFGJ[AB]{2}EC"); + sort.setSort ("int"); + assertMatchesPattern (multi, queryA, sort, "IDHFGJ[AB]{2}EC"); - sort.setSort (new SortField[] {new SortField ("float", SortField.FLOAT), SortField.FIELD_DOC}); - assertMatchesPattern (multi, queryA, sort, "GDHJ[CI]{2}EFAB"); + sort.setSort (new SortField[] {new SortField ("float", SortField.FLOAT), SortField.FIELD_DOC}); + assertMatchesPattern (multi, queryA, sort, "GDHJ[CI]{2}EFAB"); - sort.setSort ("float"); - assertMatchesPattern (multi, queryA, sort, "GDHJ[CI]{2}EFAB"); + sort.setSort ("float"); + assertMatchesPattern (multi, queryA, sort, "GDHJ[CI]{2}EFAB"); - sort.setSort ("string"); - assertMatches (multi, queryA, sort, "DJAIHGFEBC"); + sort.setSort ("string"); + assertMatches (multi, queryA, sort, "DJAIHGFEBC"); - sort.setSort ("int", true); - assertMatchesPattern (multi, queryA, sort, "C[AB]{2}EJGFHDI"); + sort.setSort ("int", true); + assertMatchesPattern (multi, queryA, sort, "C[AB]{2}EJGFHDI"); - sort.setSort ("float", true); - assertMatchesPattern (multi, queryA, sort, "BAFE[IC]{2}JHDG"); + sort.setSort ("float", true); + assertMatchesPattern (multi, queryA, sort, "BAFE[IC]{2}JHDG"); - sort.setSort ("string", true); - assertMatches (multi, queryA, sort, "CBEFGHIAJD"); + sort.setSort ("string", true); + assertMatches (multi, queryA, sort, "CBEFGHIAJD"); - sort.setSort (new SortField[] { new SortField ("string", Locale.US) }); - assertMatches (multi, queryA, sort, "DJAIHGFEBC"); + sort.setSort (new SortField[] { new SortField ("string", Locale.US) }); + assertMatches (multi, queryA, sort, "DJAIHGFEBC"); - sort.setSort (new SortField[] { new SortField ("string", Locale.US, true) }); - assertMatches (multi, queryA, sort, "CBEFGHIAJD"); + sort.setSort (new SortField[] { new SortField ("string", Locale.US, true) }); + assertMatches (multi, queryA, sort, "CBEFGHIAJD"); - sort.setSort (new String[] {"int","float"}); - assertMatches (multi, queryA, sort, "IDHFGJEABC"); + sort.setSort (new String[] {"int","float"}); + assertMatches (multi, queryA, sort, "IDHFGJEABC"); - sort.setSort (new String[] {"float","string"}); - assertMatches (multi, queryA, sort, "GDHJICEFAB"); + sort.setSort (new String[] {"float","string"}); + assertMatches (multi, queryA, sort, "GDHJICEFAB"); - sort.setSort ("int"); - assertMatches (multi, queryF, sort, "IZJ"); + sort.setSort ("int"); + assertMatches (multi, queryF, sort, "IZJ"); - sort.setSort ("int", true); - assertMatches (multi, queryF, sort, "JZI"); + sort.setSort ("int", true); + assertMatches (multi, queryF, sort, "JZI"); - sort.setSort ("float"); - assertMatches (multi, queryF, sort, "ZJI"); + sort.setSort ("float"); + assertMatches (multi, queryF, sort, "ZJI"); - sort.setSort ("string"); - assertMatches (multi, queryF, sort, "ZJI"); + sort.setSort ("string"); + assertMatches (multi, queryF, sort, "ZJI"); - sort.setSort ("string", true); - assertMatches (multi, queryF, sort, "IJZ"); - } + sort.setSort ("string", true); + assertMatches (multi, queryF, sort, "IJZ"); + } - // make sure the documents returned by the search match the expected list - private void assertMatches (Searcher searcher, Query query, Sort sort, String expectedResult) - throws IOException { - ScoreDoc[] result = searcher.search (query, null, 1000, sort).scoreDocs; - StringBuffer buff = new StringBuffer(10); + // make sure the documents returned by the search match the expected list + private void assertMatches (Searcher searcher, Query query, Sort sort, String expectedResult) + throws IOException { + //ScoreDoc[] result = searcher.search (query, null, 1000, sort).scoreDocs; + TopDocs hits = searcher.search (query, null, expectedResult.length(), sort); + ScoreDoc[] result = hits.scoreDocs; + assertEquals(hits.totalHits, expectedResult.length()); + StringBuffer buff = new StringBuffer(10); int n = result.length; - for (int i=0; i 0 ) { try { Thread.sleep(slowdown); @@ -315,8 +319,9 @@ System.out.println("caught " + x); } } - bits.set( doc ); - lastDocCollected = doc; + assert docId >= 0: " base=" + docBase + " doc=" + doc; + bits.set( docId ); + lastDocCollected = docId; } public int hitCount() { @@ -326,6 +331,10 @@ public int getLastDocCollected() { return lastDocCollected; } + + public void setNextReader(IndexReader reader, int base) { + docBase = base; + } } Index: src/test/org/apache/lucene/search/TestTermScorer.java =================================================================== --- src/test/org/apache/lucene/search/TestTermScorer.java (revision 735652) +++ src/test/org/apache/lucene/search/TestTermScorer.java (working copy) @@ -82,15 +82,19 @@ //must call next first - ts.score(new HitCollector() + ts.score(new MultiReaderHitCollector() { + private int base = -1; public void collect(int doc, float score) { - docs.add(new TestHit(doc, score)); + docs.add(new TestHit(doc + base, score)); assertTrue("score " + score + " is not greater than 0", score > 0); assertTrue("Doc: " + doc + " does not equal: " + 0 + " or doc does not equaal: " + 5, doc == 0 || doc == 5); } + public void setNextReader(IndexReader reader, int docBase) { + base = docBase; + } }); assertTrue("docs Size: " + docs.size() + " is not: " + 2, docs.size() == 2); TestHit doc0 = (TestHit) docs.get(0); Index: src/test/org/apache/lucene/search/TestSetNorm.java =================================================================== --- src/test/org/apache/lucene/search/TestSetNorm.java (revision 735652) +++ src/test/org/apache/lucene/search/TestSetNorm.java (working copy) @@ -62,10 +62,14 @@ new IndexSearcher(store).search (new TermQuery(new Term("field", "word")), - new HitCollector() { + new MultiReaderHitCollector() { + private int base = -1; public final void collect(int doc, float score) { - scores[doc] = score; + scores[doc + base] = score; } + public void setNextReader(IndexReader reader, int docBase) { + base = docBase; + } }); float lastScore = 0.0f; Index: src/test/org/apache/lucene/search/TestScorerPerf.java =================================================================== --- src/test/org/apache/lucene/search/TestScorerPerf.java (revision 735652) +++ src/test/org/apache/lucene/search/TestScorerPerf.java (working copy) @@ -5,7 +5,6 @@ import java.util.Random; import java.util.BitSet; -import java.util.Set; import java.io.IOException; import org.apache.lucene.index.IndexReader; @@ -51,6 +50,7 @@ // This could possibly fail if Lucene starts checking for docid ranges... RAMDirectory rd = new RAMDirectory(); IndexWriter iw = new IndexWriter(rd,new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + iw.addDocument(new Document()); iw.close(); s = new IndexSearcher(rd); } @@ -96,17 +96,22 @@ return sets; } - public static class CountingHitCollector extends HitCollector { + public static class CountingHitCollector extends MultiReaderHitCollector { int count=0; int sum=0; + protected int docBase = -1; public void collect(int doc, float score) { count++; - sum += doc; // use it to avoid any possibility of being optimized away + sum += docBase+doc; // use it to avoid any possibility of being optimized away } public int getCount() { return count; } public int getSum() { return sum; } + + public void setNextReader(IndexReader reader, int base) { + docBase = base; + } } @@ -119,8 +124,8 @@ public void collect(int doc, float score) { pos = answer.nextSetBit(pos+1); - if (pos != doc) { - throw new RuntimeException("Expected doc " + pos + " but got " + doc); + if (pos != doc + docBase) { + throw new RuntimeException("Expected doc " + pos + " but got " + doc + docBase); } super.collect(doc,score); } @@ -158,6 +163,7 @@ : new CountingHitCollector(); s.search(bq, hc); ret += hc.getSum(); + if (validate) assertEquals(result.cardinality(), hc.getCount()); // System.out.println(hc.getCount()); } Index: src/test/org/apache/lucene/search/TestSimilarity.java =================================================================== --- src/test/org/apache/lucene/search/TestSimilarity.java (revision 735652) +++ src/test/org/apache/lucene/search/TestSimilarity.java (working copy) @@ -21,9 +21,9 @@ import java.util.Collection; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.Document; @@ -75,10 +75,11 @@ searcher.search (new TermQuery(b), - new HitCollector() { + new MultiReaderHitCollector() { public final void collect(int doc, float score) { assertTrue(score == 1.0f); } + public void setNextReader(IndexReader reader, int docBase) {} }); BooleanQuery bq = new BooleanQuery(); @@ -87,11 +88,15 @@ //System.out.println(bq.toString("field")); searcher.search (bq, - new HitCollector() { + new MultiReaderHitCollector() { + private int base = -1; public final void collect(int doc, float score) { //System.out.println("Doc=" + doc + " score=" + score); - assertTrue(score == (float)doc+1); + assertTrue(score == (float)doc+base+1); } + public void setNextReader(IndexReader reader, int docBase) { + base = docBase; + } }); PhraseQuery pq = new PhraseQuery(); @@ -100,22 +105,24 @@ //System.out.println(pq.toString("field")); searcher.search (pq, - new HitCollector() { + new MultiReaderHitCollector() { public final void collect(int doc, float score) { //System.out.println("Doc=" + doc + " score=" + score); assertTrue(score == 1.0f); } + public void setNextReader(IndexReader reader, int docBase) {} }); pq.setSlop(2); //System.out.println(pq.toString("field")); searcher.search (pq, - new HitCollector() { + new MultiReaderHitCollector() { public final void collect(int doc, float score) { //System.out.println("Doc=" + doc + " score=" + score); assertTrue(score == 2.0f); } + public void setNextReader(IndexReader reader, int docBase) {} }); } } Index: src/test/org/apache/lucene/search/TestStressSort.java =================================================================== --- src/test/org/apache/lucene/search/TestStressSort.java (revision 0) +++ src/test/org/apache/lucene/search/TestStressSort.java (revision 0) @@ -0,0 +1,354 @@ +package org.apache.lucene.search; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.store.MockRAMDirectory; +import org.apache.lucene.store.Directory; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.Term; +import org.apache.lucene.analysis.standard.StandardAnalyzer; + +import java.util.Random; +import java.util.Arrays; + +public class TestStressSort extends LuceneTestCase { + + private final static int NUM_DOCS = 5000; + // NOTE: put seed in here to make failures + // deterministic, but do not commit with a seed (to + // better test): + private final Random r = new Random(); + private Directory dir, dir2, dir3; + private IndexSearcher searcherMultiSegment; + private IndexSearcher searcherFewSegment; + private IndexSearcher searcherSingleSegment; + + private static final boolean VERBOSE = false; + + // min..max + private int nextInt(int min, int max) { + return min + r.nextInt(max-min+1); + } + + // 0..(lim-1) + private int nextInt(int lim) { + return r.nextInt(lim); + } + + final char[] buffer = new char[20]; + private String randomString(int size) { + assert size < 20; + for(int i=0;iFieldCache.DEFAULT for maintaining + * internal term lookup tables. + * + * NOTE: This API is experimental and might change in + * incompatible ways in the next release. + * + * @since lucene 2.9 + * @version $Id: + * @see Searcher#search(Query,Filter,int,Sort) + * @see FieldCache + */ +public class FieldValueHitQueue extends PriorityQueue { + + final static class Entry { + int slot; + int docID; + float score; + + Entry(int slot, int docID, float score) { + this.slot = slot; + this.docID = docID; + this.score = score; + } + + public String toString() { + return "slot:" + slot + " docID:" + docID; + } + } + + /** + * Creates a hit queue sorted by the given list of fields. + * @param fields SortField array we are sorting by in + * priority order (highest priority first); cannot be null or empty + * @param size The number of hits to retain. Must be + * greater than zero. + * @param subReaders Array of IndexReaders we will search, + * in order that they will be searched + * @throws IOException + */ + public FieldValueHitQueue(SortField[] fields, int size, IndexReader[] subReaders) throws IOException { + numComparators = fields.length; + comparators = new FieldComparator[numComparators]; + reverseMul = new int[numComparators]; + + if (fields.length == 0) { + throw new IllegalArgumentException("Sort must contain at least one field"); + } + + this.fields = fields; + for (int i=0; ia is less relevant than b. + * @param a ScoreDoc + * @param b ScoreDoc + * @return true if document a should be sorted after document b. + */ + protected boolean lessThan (final Object a, final Object b) { + final Entry hitA = (Entry) a; + final Entry hitB = (Entry) b; + + assert hitA != hitB; + assert hitA.slot != hitB.slot; + + if (numComparators == 1) { + // Common case + final int c = reverseMul1 * comparator1.compare(hitA.slot, hitB.slot); + if (c != 0) { + return c > 0; + } + } else { + // run comparators + for (int i=0; i 0; + } + } + } + + // avoid random sort order that could lead to duplicates (bug #31241): + return hitA.docID > hitB.docID; + } + + + /** + * Given a FieldDoc object, stores the values used + * to sort the given document. These values are not the raw + * values out of the index, but the internal representation + * of them. This is so the given search hit can be collated + * by a MultiSearcher with other search hits. + * @param doc The FieldDoc to store sort values into. + * @return The same FieldDoc passed in. + * @see Searchable#search(Weight,Filter,int,Sort) + */ + FieldDoc fillFields (final Entry entry) { + final int n = comparators.length; + final Comparable[] fields = new Comparable[n]; + for (int i=0; i 1.0f) doc.score /= maxscore; // normalize scores + return new FieldDoc(entry.docID, + entry.score, + fields); + } + + + /** Returns the SortFields being used by this hit queue. */ + SortField[] getFields() { + return fields; + } + + /** + * Attempts to detect the given field type for an IndexReader. + */ + static int detectFieldType(IndexReader reader, String fieldKey) throws IOException { + String field = ((String)fieldKey).intern(); + TermEnum enumerator = reader.terms (new Term (field)); + try { + Term term = enumerator.term(); + if (term == null) { + throw new RuntimeException ("no terms in field " + field + " - cannot determine sort type"); + } + int ret = 0; + if (term.field() == field) { + String termtext = term.text().trim(); + + /** + * Java 1.4 level code: + + if (pIntegers.matcher(termtext).matches()) + return IntegerSortedHitQueue.comparator (reader, enumerator, field); + + else if (pFloats.matcher(termtext).matches()) + return FloatSortedHitQueue.comparator (reader, enumerator, field); + */ + + // Java 1.3 level code: + try { + Integer.parseInt (termtext); + ret = SortField.INT; + } catch (NumberFormatException nfe1) { + try { + Long.parseLong(termtext); + ret = SortField.LONG; + } catch (NumberFormatException nfe2) { + try { + Float.parseFloat (termtext); + ret = SortField.FLOAT; + } catch (NumberFormatException nfe3) { + ret = SortField.STRING; + } + } + } + } else { + throw new RuntimeException ("field \"" + field + "\" does not appear to be indexed"); + } + return ret; + } finally { + enumerator.close(); + } + } +} Property changes on: src/java/org/apache/lucene/search/FieldValueHitQueue.java ___________________________________________________________________ Added: svn:eol-style + native Index: src/java/org/apache/lucene/search/TopScoreDocCollector.java =================================================================== --- src/java/org/apache/lucene/search/TopScoreDocCollector.java (revision 0) +++ src/java/org/apache/lucene/search/TopScoreDocCollector.java (revision 0) @@ -0,0 +1,101 @@ +package org.apache.lucene.search; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.util.PriorityQueue; + +/** A {@link MultiReaderHitCollector} implementation that + * collects the top-scoring documents, returning them as a + * {@link TopDocs}. This is used by {@link IndexSearcher} + * to implement {@link TopDocs}-based search. + * + *

This may be extended, overriding the {@link + * MultiReaderHitCollector#collect} method to, e.g., + * conditionally invoke super() in order to + * filter which documents are collected, but sure you + * either take docBase into account, or also override + * {@link MultiReaderHitCollector#setNextReader} method. */ +public class TopScoreDocCollector extends MultiReaderHitCollector { + + private ScoreDoc reusableSD; + + /** The total number of hits the collector encountered. */ + protected int totalHits; + + /** The priority queue which holds the top-scoring documents. */ + protected PriorityQueue hq; + + protected int docBase = 0; + + /** Construct to collect a given number of hits. + * @param numHits the maximum number of hits to collect + */ + public TopScoreDocCollector(int numHits) { + this(new HitQueue(numHits)); + } + + /** Constructor to collect the top-scoring documents by using the given PQ. + * @param hq the PQ to use by this instance. + */ + protected TopScoreDocCollector(PriorityQueue hq) { + this.hq = hq; + } + + // javadoc inherited + public void collect(int doc, float score) { + if (score > 0.0f) { + totalHits++; + if (reusableSD == null) { + reusableSD = new ScoreDoc(doc + docBase, score); + } else if (score >= reusableSD.score) { + // reusableSD holds the last "rejected" entry, so, if + // this new score is not better than that, there's no + // need to try inserting it + reusableSD.doc = doc + docBase; + reusableSD.score = score; + } else { + return; + } + reusableSD = (ScoreDoc) hq.insertWithOverflow(reusableSD); + } + } + + /** The total number of documents that matched this query. */ + public int getTotalHits() { + return totalHits; + } + + /** The top-scoring hits. */ + public TopDocs topDocs() { + ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()]; + for (int i = hq.size()-1; i >= 0; i--) { // put docs in array + scoreDocs[i] = (ScoreDoc) hq.pop(); + } + + float maxScore = (totalHits==0) + ? Float.NEGATIVE_INFINITY + : scoreDocs[0].score; + + return new TopDocs(totalHits, scoreDocs, maxScore); + } + + public void setNextReader(IndexReader reader, int base) { + docBase = base; + } +} Property changes on: src/java/org/apache/lucene/search/TopScoreDocCollector.java ___________________________________________________________________ Added: svn:eol-style + native Index: src/java/org/apache/lucene/search/TopDocCollector.java =================================================================== --- src/java/org/apache/lucene/search/TopDocCollector.java (revision 735652) +++ src/java/org/apache/lucene/search/TopDocCollector.java (working copy) @@ -26,6 +26,9 @@ *

This may be extended, overriding the collect method to, e.g., * conditionally invoke super() in order to filter which * documents are collected. + * + * @deprecated Please use {@link TopScoreDocCollector} + * instead, which has better performance. **/ public class TopDocCollector extends HitCollector { Index: src/java/org/apache/lucene/search/MultiReaderHitCollector.java =================================================================== --- src/java/org/apache/lucene/search/MultiReaderHitCollector.java (revision 0) +++ src/java/org/apache/lucene/search/MultiReaderHitCollector.java (revision 0) @@ -0,0 +1,53 @@ +package org.apache.lucene.search; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; + +import org.apache.lucene.index.IndexReader; + +/** + * Expert: A HitCollector that can be used to collect hits + * across sequential IndexReaders. For a Multi*Reader, this + * collector advances through each of the sub readers, in an + * arbitrary order. This results in a higher performance + * means of collection. + * + * NOTE: The doc that is passed to the collect method + * is relative to the current reader. You must re-base the + * doc, by recording the docBase from the last setNextReader + * call, to map it to the docID space of the + * Multi*Reader. + * + * NOTE: This API is experimental and might change in + * incompatible ways in the next release. + */ +public abstract class MultiReaderHitCollector extends HitCollector { + /** + * Called before collecting from each IndexReader. All doc + * ids in {@link #collect(int, float)} will correspond to reader. + * + * Add docBase to the current IndexReaders internal document id to + * re-base ids in {@link #collect(int, float)}. + * + * @param reader next IndexReader + * @param docBase + * @throws IOException + */ + public abstract void setNextReader(IndexReader reader, int docBase) throws IOException; +} Property changes on: src/java/org/apache/lucene/search/MultiReaderHitCollector.java ___________________________________________________________________ Added: svn:eol-style + native Index: src/java/org/apache/lucene/search/HitCollector.java =================================================================== --- src/java/org/apache/lucene/search/HitCollector.java (revision 735652) +++ src/java/org/apache/lucene/search/HitCollector.java (working copy) @@ -19,7 +19,9 @@ /** Lower-level search API. *
HitCollectors are primarily meant to be used to implement queries, - * sorting and filtering. + * sorting and filtering. See {@link + * MultiReaderHitCollector} for a lower level and + * higher performance (on a multi-segment index) API. * @see Searcher#search(Query,HitCollector) * @version $Id$ */ Index: src/java/org/apache/lucene/search/HitIterator.java =================================================================== --- src/java/org/apache/lucene/search/HitIterator.java (revision 735652) +++ src/java/org/apache/lucene/search/HitIterator.java (working copy) @@ -25,7 +25,7 @@ * {@link Hits#iterator()} returns an instance of this class. Calls to {@link #next()} * return a {@link Hit} instance. * - * @deprecated Hits will be removed in Lucene 3.0. Use {@link TopDocCollector} and {@link TopDocs} instead. + * @deprecated Hits will be removed in Lucene 3.0. Use {@link TopScoreDocCollector} and {@link TopDocs} instead. */ public class HitIterator implements Iterator { private Hits hits; Index: src/java/org/apache/lucene/search/FieldComparator.java =================================================================== --- src/java/org/apache/lucene/search/FieldComparator.java (revision 0) +++ src/java/org/apache/lucene/search/FieldComparator.java (revision 0) @@ -0,0 +1,869 @@ +package org.apache.lucene.search; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.text.Collator; +import java.util.Locale; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.ExtendedFieldCache.DoubleParser; +import org.apache.lucene.search.ExtendedFieldCache.LongParser; +import org.apache.lucene.search.FieldCache.ByteParser; +import org.apache.lucene.search.FieldCache.FloatParser; +import org.apache.lucene.search.FieldCache.IntParser; +import org.apache.lucene.search.FieldCache.ShortParser; +import org.apache.lucene.search.FieldCache.StringIndex; + +/** + * A FieldComparator compares hits across multiple IndexReaders. + * + * A comparator can compare a hit at hit 'slot a' with hit 'slot b', + * compare a hit on 'doc i' with hit 'slot a', or copy a hit at 'doc i' + * to 'slot a'. Each slot refers to a hit while each doc refers to the + * current IndexReader. + * + * NOTE: This API is experimental and might change in + * incompatible ways in the next release. + */ +public abstract class FieldComparator { + + /** Parses field's values as byte (using {@link + * FieldCache#getBytes} and sorts by ascending value */ + public static final class ByteComparator extends FieldComparator { + private final byte[] values; + private byte[] currentReaderValues; + private final String field; + private ByteParser parser; + private byte bottom; + + ByteComparator(int numHits, String field, FieldCache.Parser parser) { + values = new byte[numHits]; + this.field = field; + this.parser = (ByteParser) parser; + } + + public int compare(int slot1, int slot2) { + return values[slot1] - values[slot2]; + } + + public int compareBottom(int doc, float score) { + return bottom - currentReaderValues[doc]; + } + + public void copy(int slot, int doc, float score) { + values[slot] = currentReaderValues[doc]; + } + + public void setNextReader(IndexReader reader, int docBase, int numSlotsFull) throws IOException { + currentReaderValues = parser != null ? ExtendedFieldCache.EXT_DEFAULT + .getBytes(reader, field, parser) : ExtendedFieldCache.EXT_DEFAULT + .getBytes(reader, field); + } + + public void setBottom(final int bottom) { + this.bottom = values[bottom]; + } + + public int sortType() { + return SortField.BYTE; + } + + public Comparable value(int slot) { + return new Byte(values[slot]); + } + }; + + /** Sorts by ascending docID */ + public static final class DocComparator extends FieldComparator { + private final int[] docIDs; + private int docBase; + private int bottom; + + DocComparator(int numHits) { + docIDs = new int[numHits]; + } + + public int compare(int slot1, int slot2) { + // No overflow risk because docIDs are non-negative + return docIDs[slot1] - docIDs[slot2]; + } + + public int compareBottom(int doc, float score) { + // No overflow risk because docIDs are non-negative + return bottom - (docBase + doc); + } + + public void copy(int slot, int doc, float score) { + docIDs[slot] = docBase + doc; + } + + public void setNextReader(IndexReader reader, int docBase, int numSlotsFull) { + // TODO: can we "map" our docIDs to the current + // reader? saves having to then subtract on every + // compare call + this.docBase = docBase; + } + + public void setBottom(final int bottom) { + this.bottom = docIDs[bottom]; + } + + public int sortType() { + return SortField.DOC; + } + + public Comparable value(int slot) { + return new Integer(docIDs[slot]); + } + }; + + /** Parses field's values as double (using {@link + * ExtendedFieldCache#getDoubles} and sorts by ascending value */ + public static final class DoubleComparator extends FieldComparator { + private final double[] values; + private double[] currentReaderValues; + private final String field; + private DoubleParser parser; + private double bottom; + + DoubleComparator(int numHits, String field, FieldCache.Parser parser) { + values = new double[numHits]; + this.field = field; + this.parser = (DoubleParser) parser; + } + + public int compare(int slot1, int slot2) { + final double v1 = values[slot1]; + final double v2 = values[slot2]; + if (v1 > v2) { + return 1; + } else if (v1 < v2) { + return -1; + } else { + return 0; + } + } + + public int compareBottom(int doc, float score) { + final double v2 = currentReaderValues[doc]; + if (bottom > v2) { + return 1; + } else if (bottom < v2) { + return -1; + } else { + return 0; + } + } + + public void copy(int slot, int doc, float score) { + values[slot] = currentReaderValues[doc]; + } + + public void setNextReader(IndexReader reader, int docBase, int numSlotsFull) throws IOException { + currentReaderValues = parser != null ? ExtendedFieldCache.EXT_DEFAULT + .getDoubles(reader, field, parser) : ExtendedFieldCache.EXT_DEFAULT + .getDoubles(reader, field); + } + + public void setBottom(final int bottom) { + this.bottom = values[bottom]; + } + + public int sortType() { + return SortField.DOUBLE; + } + + public Comparable value(int slot) { + return new Double(values[slot]); + } + }; + + /** Parses field's values as float (using {@link + * FieldCache#getFloats} and sorts by ascending value */ + public static final class FloatComparator extends FieldComparator { + private final float[] values; + private float[] currentReaderValues; + private final String field; + private FloatParser parser; + private float bottom; + + FloatComparator(int numHits, String field, FieldCache.Parser parser) { + values = new float[numHits]; + this.field = field; + this.parser = (FloatParser) parser; + } + + public int compare(int slot1, int slot2) { + // TODO: are there sneaky non-branch ways to compute + // sign of float? + final float v1 = values[slot1]; + final float v2 = values[slot2]; + if (v1 > v2) { + return 1; + } else if (v1 < v2) { + return -1; + } else { + return 0; + } + } + + public int compareBottom(int doc, float score) { + // TODO: are there sneaky non-branch ways to compute + // sign of float? + final float v2 = currentReaderValues[doc]; + if (bottom > v2) { + return 1; + } else if (bottom < v2) { + return -1; + } else { + return 0; + } + } + + public void copy(int slot, int doc, float score) { + values[slot] = currentReaderValues[doc]; + } + + public void setNextReader(IndexReader reader, int docBase, int numSlotsFull) throws IOException { + currentReaderValues = parser != null ? FieldCache.DEFAULT.getFloats( + reader, field, parser) : FieldCache.DEFAULT.getFloats(reader, field); + } + + public void setBottom(final int bottom) { + this.bottom = values[bottom]; + } + + public int sortType() { + return SortField.FLOAT; + } + + public Comparable value(int slot) { + return new Float(values[slot]); + } + }; + + /** Parses field's values as int (using {@link + * FieldCache#getInts} and sorts by ascending value */ + public static final class IntComparator extends FieldComparator { + private final int[] values; + private int[] currentReaderValues; + private final String field; + private IntParser parser; + private int bottom; // Value of bottom of queue + + IntComparator(int numHits, String field, FieldCache.Parser parser) { + values = new int[numHits]; + this.field = field; + this.parser = (IntParser) parser; + } + + public int compare(int slot1, int slot2) { + // TODO: there are sneaky non-branch ways to compute + // -1/+1/0 sign + // Cannot return values[slot1] - values[slot2] because that + // may overflow + final int v1 = values[slot1]; + final int v2 = values[slot2]; + if (v1 > v2) { + return 1; + } else if (v1 < v2) { + return -1; + } else { + return 0; + } + } + + public int compareBottom(int doc, float score) { + // TODO: there are sneaky non-branch ways to compute + // -1/+1/0 sign + // Cannot return bottom - values[slot2] because that + // may overflow + final int v2 = currentReaderValues[doc]; + if (bottom > v2) { + return 1; + } else if (bottom < v2) { + return -1; + } else { + return 0; + } + } + + public void copy(int slot, int doc, float score) { + values[slot] = currentReaderValues[doc]; + } + + public void setNextReader(IndexReader reader, int docBase, int numSlotsFull) throws IOException { + currentReaderValues = parser != null ? FieldCache.DEFAULT.getInts(reader, + field, parser) : FieldCache.DEFAULT.getInts(reader, field); + } + + public void setBottom(final int bottom) { + this.bottom = values[bottom]; + } + + public int sortType() { + return SortField.INT; + } + + public Comparable value(int slot) { + return new Integer(values[slot]); + } + }; + + /** Parses field's values as long (using {@link + * ExtendedFieldCache#getLongs} and sorts by ascending value */ + public static final class LongComparator extends FieldComparator { + private final long[] values; + private long[] currentReaderValues; + private final String field; + private LongParser parser; + private long bottom; + + LongComparator(int numHits, String field, FieldCache.Parser parser) { + values = new long[numHits]; + this.field = field; + this.parser = (LongParser) parser; + } + + public int compare(int slot1, int slot2) { + // TODO: there are sneaky non-branch ways to compute + // -1/+1/0 sign + final long v1 = values[slot1]; + final long v2 = values[slot2]; + if (v1 > v2) { + return 1; + } else if (v1 < v2) { + return -1; + } else { + return 0; + } + } + + public int compareBottom(int doc, float score) { + // TODO: there are sneaky non-branch ways to compute + // -1/+1/0 sign + final long v2 = currentReaderValues[doc]; + if (bottom > v2) { + return 1; + } else if (bottom < v2) { + return -1; + } else { + return 0; + } + } + + public void copy(int slot, int doc, float score) { + values[slot] = currentReaderValues[doc]; + } + + public void setNextReader(IndexReader reader, int docBase, int numSlotsFull) throws IOException { + currentReaderValues = parser != null ? ExtendedFieldCache.EXT_DEFAULT + .getLongs(reader, field, parser) : ExtendedFieldCache.EXT_DEFAULT + .getLongs(reader, field); + } + + public void setBottom(final int bottom) { + this.bottom = values[bottom]; + } + + public int sortType() { + return SortField.LONG; + } + + public Comparable value(int slot) { + return new Long(values[slot]); + } + }; + + /** Sorts by descending relevance. NOTE: if you are + * sorting only by descending relevance and then + * secondarily by ascending docID, peformance is faster + * using {@link TopScoreDocCollector} directly (which {@link + * IndexSearcher#search} uses when no {@link Sort} is + * specified). */ + public static final class RelevanceComparator extends FieldComparator { + private final float[] scores; + private float bottom; + + RelevanceComparator(int numHits) { + scores = new float[numHits]; + } + + public int compare(int slot1, int slot2) { + final float score1 = scores[slot1]; + final float score2 = scores[slot2]; + if (score1 > score2) { + return -1; + } else if (score1 < score2) { + return 1; + } else { + return 0; + } + } + + public int compareBottom(int doc, float score) { + if (bottom > score) { + return -1; + } else if (bottom < score) { + return 1; + } else { + return 0; + } + } + + public void copy(int slot, int doc, float score) { + scores[slot] = score; + } + + public void setNextReader(IndexReader reader, int docBase, int numSlotsFull) { + } + + public void setBottom(final int bottom) { + this.bottom = scores[bottom]; + } + + public int sortType() { + return SortField.SCORE; + } + + public Comparable value(int slot) { + return new Float(scores[slot]); + } + }; + + /** Parses field's values as short (using {@link + * FieldCache#getShorts} and sorts by ascending value */ + public static final class ShortComparator extends FieldComparator { + private final short[] values; + private short[] currentReaderValues; + private final String field; + private ShortParser parser; + private short bottom; + + ShortComparator(int numHits, String field, FieldCache.Parser parser) { + values = new short[numHits]; + this.field = field; + this.parser = (ShortParser) parser; + } + + public int compare(int slot1, int slot2) { + return values[slot1] - values[slot2]; + } + + public int compareBottom(int doc, float score) { + return bottom - currentReaderValues[doc]; + } + + public void copy(int slot, int doc, float score) { + values[slot] = currentReaderValues[doc]; + } + + public void setNextReader(IndexReader reader, int docBase, int numSlotsFull) throws IOException { + currentReaderValues = parser != null ? ExtendedFieldCache.EXT_DEFAULT + .getShorts(reader, field, parser) : ExtendedFieldCache.EXT_DEFAULT + .getShorts(reader, field); + } + + public void setBottom(final int bottom) { + this.bottom = values[bottom]; + } + + public int sortType() { + return SortField.BYTE; + } + + public Comparable value(int slot) { + return new Short(values[slot]); + } + }; + + /** Sorts by a field's value using the Collator for a + * given Locale.*/ + public static final class StringComparatorLocale extends FieldComparator { + + private final String[] values; + private String[] currentReaderValues; + private final String field; + final Collator collator; + private String bottom; + + StringComparatorLocale(int numHits, String field, Locale locale) { + values = new String[numHits]; + this.field = field; + collator = Collator.getInstance(locale); + } + + public int compare(int slot1, int slot2) { + final String val1 = values[slot1]; + final String val2 = values[slot2]; + if (val1 == null) { + if (val2 == null) { + return 0; + } + return -1; + } else if (val2 == null) { + return 1; + } + return collator.compare(val1, val2); + } + + public int compareBottom(int doc, float score) { + final String val2 = currentReaderValues[doc]; + if (bottom == null) { + if (val2 == null) { + return 0; + } + return -1; + } else if (val2 == null) { + return 1; + } + return collator.compare(bottom, val2); + } + + public void copy(int slot, int doc, float score) { + values[slot] = currentReaderValues[doc]; + } + + public void setNextReader(IndexReader reader, int docBase, int numSlotsFull) throws IOException { + currentReaderValues = ExtendedFieldCache.EXT_DEFAULT.getStrings(reader, + field); + } + + public void setBottom(final int bottom) { + this.bottom = values[bottom]; + } + + public int sortType() { + return SortField.STRING; + } + + public Comparable value(int slot) { + return values[slot]; + } + }; + + // NOTE: there were a number of other interesting String + // comparators explored, but this one seemed to perform + // best all around. See LUCENE-1483 for details. + public static final class StringOrdValComparator extends FieldComparator { + + private final int[] ords; + private final String[] values; + private final int[] readerGen; + + private int currentReaderGen = -1; + private String[] lookup; + private int[] order; + private final String field; + + private int bottomSlot = -1; + private int bottomOrd; + private String bottomValue; + private final boolean reversed; + private final int sortPos; + + public StringOrdValComparator(int numHits, String field, int sortPos, boolean reversed) { + ords = new int[numHits]; + values = new String[numHits]; + readerGen = new int[numHits]; + this.sortPos = sortPos; + this.reversed = reversed; + this.field = field; + } + + public int compare(int slot1, int slot2) { + if (readerGen[slot1] == readerGen[slot2]) { + int cmp = ords[slot1] - ords[slot2]; + if (cmp != 0) { + return cmp; + } + } + + final String val1 = values[slot1]; + final String val2 = values[slot2]; + if (val1 == null) { + if (val2 == null) { + return 0; + } + return -1; + } else if (val2 == null) { + return 1; + } + return val1.compareTo(val2); + } + + public int compareBottom(int doc, float score) { + assert bottomSlot != -1; + int order = this.order[doc]; + final int cmp = bottomOrd - order; + if (cmp != 0) { + return cmp; + } + + final String val2 = lookup[order]; + if (bottomValue == null) { + if (val2 == null) { + return 0; + } + // bottom wins + return -1; + } else if (val2 == null) { + // doc wins + return 1; + } + return bottomValue.compareTo(val2); + } + + private void convert(int slot) { + readerGen[slot] = currentReaderGen; + int index = 0; + String value = values[slot]; + if (value == null) { + ords[slot] = 0; + return; + } + + if (sortPos == 0 && bottomSlot != -1 && bottomSlot != slot) { + // Since we are the primary sort, the entries in the + // queue are bounded by bottomOrd: + assert bottomOrd < lookup.length; + if (reversed) { + index = binarySearch(lookup, value, bottomOrd, lookup.length-1); + } else { + index = binarySearch(lookup, value, 0, bottomOrd); + } + } else { + // Full binary search + index = binarySearch(lookup, value); + } + + if (index < 0) { + index = -index - 2; + } + ords[slot] = index; + } + + public void copy(int slot, int doc, float score) { + final int ord = order[doc]; + ords[slot] = ord; + assert ord >= 0; + values[slot] = lookup[ord]; + readerGen[slot] = currentReaderGen; + } + + public void setNextReader(IndexReader reader, int docBase, int numSlotsFull) throws IOException { + StringIndex currentReaderValues = ExtendedFieldCache.EXT_DEFAULT.getStringIndex(reader, field); + currentReaderGen++; + order = currentReaderValues.order; + lookup = currentReaderValues.lookup; + assert lookup.length > 0; + if (bottomSlot != -1) { + convert(bottomSlot); + bottomOrd = ords[bottomSlot]; + } + } + + public void setBottom(final int bottom) { + bottomSlot = bottom; + if (readerGen[bottom] != currentReaderGen) { + convert(bottomSlot); + } + bottomOrd = ords[bottom]; + assert bottomOrd >= 0; + assert bottomOrd < lookup.length; + bottomValue = values[bottom]; + } + + public int sortType() { + return SortField.STRING; + } + + public Comparable value(int slot) { + return values[slot]; + } + + public String[] getValues() { + return values; + } + + public int getBottomSlot() { + return bottomSlot; + } + + public String getField() { + return field; + } + }; + + /** Sorts by field's natural String sort order. All + * comparisons are done using String.compareTo, which is + * slow for medium to large result sets but possibly + * very fast for very small results sets. */ + public static final class StringValComparator extends FieldComparator { + + private String[] values; + private String[] currentReaderValues; + private final String field; + private String bottom; + + StringValComparator(int numHits, String field) { + values = new String[numHits]; + this.field = field; + } + + public int compare(int slot1, int slot2) { + final String val1 = values[slot1]; + final String val2 = values[slot2]; + if (val1 == null) { + if (val2 == null) { + return 0; + } + return -1; + } else if (val2 == null) { + return 1; + } + + return val1.compareTo(val2); + } + + public int compareBottom(int doc, float score) { + final String val2 = currentReaderValues[doc]; + if (bottom == null) { + if (val2 == null) { + return 0; + } + return -1; + } else if (val2 == null) { + return 1; + } + return bottom.compareTo(val2); + } + + public void copy(int slot, int doc, float score) { + values[slot] = currentReaderValues[doc]; + } + + public void setNextReader(IndexReader reader, int docBase, int numSlotsFull) throws IOException { + currentReaderValues = ExtendedFieldCache.EXT_DEFAULT.getStrings(reader, + field); + } + + public void setBottom(final int bottom) { + this.bottom = values[bottom]; + } + + public int sortType() { + return SortField.STRING_VAL; + } + + public Comparable value(int slot) { + return values[slot]; + } + }; + + final protected static int binarySearch(String[] a, String key) { + return binarySearch(a, key, 0, a.length-1); + }; + + final protected static int binarySearch(String[] a, String key, int low, int high) { + + while (low <= high) { + int mid = (low + high) >>> 1; + String midVal = a[mid]; + int cmp; + if (midVal != null) { + cmp = midVal.compareTo(key); + } else { + cmp = -1; + } + + if (cmp < 0) + low = mid + 1; + else if (cmp > 0) + high = mid - 1; + else + return mid; + } + return -(low + 1); + }; + + /** + * Compare hit at slot1 with hit at slot2. Return + * + * @param slot1 first slot to compare + * @param slot2 second slot to compare + * @return any N < 0 if slot2's value is sorted after + * slot1, any N > 0 if the slot2's value is sorted before + * slot1 and 0 if they are equal + */ + public abstract int compare(int slot1, int slot2); + + /** + * Set the bottom queue slot, ie the "weakest" (sorted + * last) entry in the queue. + * + * @param slot the currently weakest (sorted lost) slot in the queue + */ + public abstract void setBottom(final int slot); + + /** + * Compare the bottom of the queue with doc. This will + * only invoked after setBottom has been called. + * + * @param doc that was hit + * @param score of the hit + * @return any N < 0 if the doc's value is sorted after + * the bottom entry (not competitive), any N > 0 if the + * doc's value is sorted before the bottom entry and 0 if + * they are equal. + */ + public abstract int compareBottom(int doc, float score); + + /** + * Copy hit (doc,score) to hit slot. + * + * @param slot which slot to copy the hit to + * @param doc docID relative to current reader + * @param score hit score + */ + public abstract void copy(int slot, int doc, float score); + + /** + * Set a new Reader. All doc correspond to the current Reader. + * + * @param reader current reader + * @param docBase docBase of this reader + * @throws IOException + * @throws IOException + */ + public abstract void setNextReader(IndexReader reader, int docBase, int numSlotsFull) throws IOException; + + /** + * @return SortField.TYPE + */ + public abstract int sortType(); + + /** + * Return the actual value at slot. + * + * @param slot the value + * @return value in this slot upgraded to Comparable + */ + public abstract Comparable value(int slot); +} Property changes on: src/java/org/apache/lucene/search/FieldComparator.java ___________________________________________________________________ Added: svn:eol-style + native Index: src/java/org/apache/lucene/search/FieldSortedHitQueue.java =================================================================== --- src/java/org/apache/lucene/search/FieldSortedHitQueue.java (revision 735652) +++ src/java/org/apache/lucene/search/FieldSortedHitQueue.java (working copy) @@ -34,6 +34,7 @@ * @version $Id$ * @see Searcher#search(Query,Filter,int,Sort) * @see FieldCache + * @deprecated see {@link FieldValueHitQueue} */ public class FieldSortedHitQueue extends PriorityQueue { @@ -52,18 +53,10 @@ this.fields = new SortField[n]; for (int i=0; inull. @@ -206,22 +215,46 @@ /** Creates a sort with a custom comparison function. * @param field Name of field to sort by; cannot be null. * @param comparator Returns a comparator for sorting hits. + * @deprecated use SortField (String field, FieldComparatorSource comparator) */ public SortField (String field, SortComparatorSource comparator) { initFieldType(field, CUSTOM); + setUseLegacySearch(true); this.factory = comparator; } + + /** Creates a sort with a custom comparison function. + * @param field Name of field to sort by; cannot be null. + * @param comparator Returns a comparator for sorting hits. + */ + public SortField (String field, FieldComparatorSource comparator) { + initFieldType(field, CUSTOM); + this.comparatorSource = comparator; + } /** Creates a sort, possibly in reverse, with a custom comparison function. * @param field Name of field to sort by; cannot be null. * @param comparator Returns a comparator for sorting hits. * @param reverse True if natural order should be reversed. + * @deprecated use SortField (String field, FieldComparatorSource comparator, boolean reverse) */ public SortField (String field, SortComparatorSource comparator, boolean reverse) { initFieldType(field, CUSTOM); + setUseLegacySearch(true); this.reverse = reverse; this.factory = comparator; } + + /** Creates a sort, possibly in reverse, with a custom comparison function. + * @param field Name of field to sort by; cannot be null. + * @param comparator Returns a comparator for sorting hits. + * @param reverse True if natural order should be reversed. + */ + public SortField (String field, FieldComparatorSource comparator, boolean reverse) { + initFieldType(field, CUSTOM); + this.reverse = reverse; + this.comparatorSource = comparator; + } // Sets field & type, and ensures field is not NULL unless // type is SCORE or DOC @@ -273,26 +306,91 @@ return reverse; } + /** + * @deprecated use {@link #getComparatorSource()} + */ public SortComparatorSource getFactory() { return factory; } + + public FieldComparatorSource getComparatorSource() { + return comparatorSource; + } + + /** + * Use legacy IndexSearch implementation: search with a MultiSegmentReader rather + * than passing a single hit collector to multiple SegmentReaders. + * + * @param legacy true for legacy behavior + * @deprecated will be removed in Lucene 3.0. + */ + public void setUseLegacySearch(boolean legacy) { + this.useLegacy = legacy; + } + + /** + * @return if true, IndexSearch will use legacy sorting search implementation. + * eg. multiple Priority Queues. + * @deprecated will be removed in Lucene 3.0. + */ + public boolean getUseLegacySearch() { + return this.useLegacy; + } public String toString() { StringBuffer buffer = new StringBuffer(); switch (type) { - case SCORE: buffer.append(""); - break; + case SCORE: + buffer.append(""); + break; - case DOC: buffer.append(""); - break; + case DOC: + buffer.append(""); + break; + case AUTO: + buffer.append(""); + break; + + case STRING: + buffer.append(""); + break; + + case STRING_VAL: + buffer.append(""); + break; + + case BYTE: + buffer.append(""); + break; + + case SHORT: + buffer.append(""); + break; + + case INT: + buffer.append(""); + break; + + case LONG: + buffer.append(""); + break; + + case FLOAT: + buffer.append(""); + break; + + case DOUBLE: + buffer.append(""); + break; + case CUSTOM: - buffer.append("'); - break; + buffer.append("'); + break; default: - buffer.append('\"').append(field).append('\"'); - break; + buffer.append(""); + break; } if (locale != null) buffer.append('(').append(locale).append(')'); @@ -333,4 +431,65 @@ if (parser != null) hash += parser.hashCode()^0x3aaf56ff; return hash; } + + + /** Returns the {@link FieldComparator} to use for sorting. + * @param subReaders array of {@link IndexReader} search + * will step through + * @param numHits number of top hits the queue will store + * @param sortPos position of this SortField within {@link + * Sort}. The comparator is primary if sortPos==0, + * secondary if sortPos==1, etc. Some comparators can + * optimize themselves when they are the primary sort. + * @param reversed True if the SortField is reversed + * @return {@link FieldComparator} to use when sorting + */ + protected FieldComparator getComparator(final IndexReader[] subReaders, final int numHits, final int sortPos, final boolean reversed) throws IOException { + + if (locale != null) { + // TODO: it'd be nice to allow FieldCache.getStringIndex + // to optionally accept a Locale so sorting could then use + // the faster StringComparator impls + return new FieldComparator.StringComparatorLocale(numHits, field, locale); + } + + switch (type) { + case SortField.SCORE: + return new FieldComparator.RelevanceComparator(numHits); + + case SortField.DOC: + return new FieldComparator.DocComparator(numHits); + + case SortField.INT: + return new FieldComparator.IntComparator(numHits, field, parser); + + case SortField.FLOAT: + return new FieldComparator.FloatComparator(numHits, field, parser); + + case SortField.LONG: + return new FieldComparator.LongComparator(numHits, field, parser); + + case SortField.DOUBLE: + return new FieldComparator.DoubleComparator(numHits, field, parser); + + case SortField.BYTE: + return new FieldComparator.ByteComparator(numHits, field, parser); + + case SortField.SHORT: + return new FieldComparator.ShortComparator(numHits, field, parser); + + case SortField.CUSTOM: + assert factory == null && comparatorSource != null; + return comparatorSource.newComparator(field, subReaders, numHits, sortPos, reversed); + + case SortField.STRING: + return new FieldComparator.StringOrdValComparator(numHits, field, sortPos, reversed); + + case SortField.STRING_VAL: + return new FieldComparator.StringValComparator(numHits, field); + + default: + throw new IllegalStateException("Illegal sort type: " + type); + } + } } Index: src/java/org/apache/lucene/search/BooleanScorer.java =================================================================== --- src/java/org/apache/lucene/search/BooleanScorer.java (revision 735652) +++ src/java/org/apache/lucene/search/BooleanScorer.java (working copy) @@ -19,6 +19,7 @@ import java.io.IOException; +import org.apache.lucene.index.IndexReader; /* Description from Doug Cutting (excerpted from * LUCENE-1483): * @@ -79,11 +80,11 @@ public boolean done; public boolean required = false; public boolean prohibited = false; - public HitCollector collector; + public MultiReaderHitCollector collector; public SubScorer next; public SubScorer(Scorer scorer, boolean required, boolean prohibited, - HitCollector collector, SubScorer next) + MultiReaderHitCollector collector, SubScorer next) throws IOException { this.scorer = scorer; this.done = !scorer.next(); @@ -248,12 +249,12 @@ public final int size() { return SIZE; } - public HitCollector newCollector(int mask) { + public MultiReaderHitCollector newCollector(int mask) { return new Collector(mask, this); } } - static final class Collector extends HitCollector { + static final class Collector extends MultiReaderHitCollector { private BucketTable bucketTable; private int mask; public Collector(int mask, BucketTable bucketTable) { @@ -281,6 +282,9 @@ bucket.coord++; // increment coord } } + public void setNextReader(IndexReader reader, int docBase) { + // not needed by this implementation + } } public boolean skipTo(int target) { Index: src/java/org/apache/lucene/search/MultiSearcher.java =================================================================== --- src/java/org/apache/lucene/search/MultiSearcher.java (revision 736348) +++ src/java/org/apache/lucene/search/MultiSearcher.java (working copy) @@ -21,6 +21,7 @@ import org.apache.lucene.document.FieldSelector; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.Term; +import org.apache.lucene.index.IndexReader; import java.io.IOException; import java.util.HashMap; @@ -258,12 +259,34 @@ final int start = starts[i]; - searchables[i].search(weight, filter, new HitCollector() { - public void collect(int doc, float score) { - results.collect(doc + start, score); - } - }); + final MultiReaderHitCollector hc; + if (results instanceof MultiReaderHitCollector) { + // results can shift + final MultiReaderHitCollector resultsMulti = (MultiReaderHitCollector) results; + hc = new MultiReaderHitCollector() { + public void collect(int doc, float score) { + resultsMulti.collect(doc, score); + } + public void setNextReader(IndexReader reader, int docBase) throws IOException { + resultsMulti.setNextReader(reader, start+docBase); + } + }; + } else { + // We must shift the docIDs + hc = new MultiReaderHitCollector() { + private int docBase; + public void collect(int doc, float score) { + results.collect(doc + docBase + start, score); + } + + public void setNextReader(IndexReader reader, int docBase) { + this.docBase = docBase; + } + }; + } + + searchables[i].search(weight, filter, hc); } } Index: src/java/org/apache/lucene/search/Hits.java =================================================================== --- src/java/org/apache/lucene/search/Hits.java (revision 735652) +++ src/java/org/apache/lucene/search/Hits.java (working copy) @@ -40,9 +40,9 @@ * (but n < {@link #length()}_at_start). * * @deprecated Hits will be removed in Lucene 3.0.

- * Instead e. g. {@link TopDocCollector} and {@link TopDocs} can be used:
+ * Instead e. g. {@link TopScoreDocCollector} and {@link TopDocs} can be used:
*

- *   TopDocCollector collector = new TopDocCollector(hitsPerPage);
+ *   TopScoreDocCollector collector = new TopScoreDocCollector(hitsPerPage);
  *   searcher.search(query, collector);
  *   ScoreDoc[] hits = collector.topDocs().scoreDocs;
  *   for (int i = 0; i < hits.length; i++) {
Index: src/java/org/apache/lucene/search/QueryWrapperFilter.java
===================================================================
--- src/java/org/apache/lucene/search/QueryWrapperFilter.java	(revision 735652)
+++ src/java/org/apache/lucene/search/QueryWrapperFilter.java	(working copy)
@@ -21,7 +21,6 @@
 import java.util.BitSet;
 
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.util.OpenBitSet;
 
 /** 
  * Constrains search results to only match those which also match a provided
@@ -51,10 +50,14 @@
   public BitSet bits(IndexReader reader) throws IOException {
     final BitSet bits = new BitSet(reader.maxDoc());
 
-    new IndexSearcher(reader).search(query, new HitCollector() {
+    new IndexSearcher(reader).search(query, new MultiReaderHitCollector() {
+      private int base = -1;
       public final void collect(int doc, float score) {
-        bits.set(doc);  // set bit for hit
+        bits.set(doc + base);  // set bit for hit
       }
+      public void setNextReader(IndexReader reader, int docBase) {
+        base = docBase;
+      }
     });
     return bits;
   }
Index: src/java/org/apache/lucene/search/TimeLimitedCollector.java
===================================================================
--- src/java/org/apache/lucene/search/TimeLimitedCollector.java	(revision 735652)
+++ src/java/org/apache/lucene/search/TimeLimitedCollector.java	(working copy)
@@ -1,5 +1,7 @@
 package org.apache.lucene.search;
 
+import org.apache.lucene.index.IndexReader;
+
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -17,13 +19,16 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+
 /**
  * 

The TimeLimitedCollector is used to timeout search requests that * take longer than the maximum allowed search time limit. After this * time is exceeded, the search thread is stopped by throwing a * TimeExceeded Exception.

+ * */ -public class TimeLimitedCollector extends HitCollector { +public class TimeLimitedCollector extends MultiReaderHitCollector { /** * Default timer resolution. @@ -138,7 +143,7 @@ private final long t0; private final long timeout; - private final HitCollector hc; + private final MultiReaderHitCollector hc; /** * Create a TimeLimitedCollector wrapper over another HitCollector with a specified timeout. @@ -146,7 +151,11 @@ * @param timeAllowed max time allowed for collecting hits after which {@link TimeExceededException} is thrown */ public TimeLimitedCollector( final HitCollector hc, final long timeAllowed ) { - this.hc = hc; + if (hc instanceof MultiReaderHitCollector) { + this.hc = (MultiReaderHitCollector) hc; + } else { + this.hc = new IndexSearcher.MultiReaderCollectorWrapper(hc); + } t0 = TIMER_THREAD.getMilliseconds(); this.timeout = t0 + timeAllowed; } @@ -216,4 +225,8 @@ public void setGreedy(boolean greedy) { this.greedy = greedy; } + + public void setNextReader(IndexReader reader, int base) throws IOException { + hc.setNextReader(reader, base); + } } Index: src/java/org/apache/lucene/search/TopFieldCollector.java =================================================================== --- src/java/org/apache/lucene/search/TopFieldCollector.java (revision 0) +++ src/java/org/apache/lucene/search/TopFieldCollector.java (revision 0) @@ -0,0 +1,221 @@ +package org.apache.lucene.search; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.FieldValueHitQueue.Entry; + +/** + * A {@link HitCollector} that sorts by {@link SortField} using + * {@link FieldComparator}s. + * + * NOTE: This API is experimental and might change in + * incompatible ways in the next release. + */ +public final class TopFieldCollector extends MultiReaderHitCollector { + + private final FieldValueHitQueue queue; + + private final FieldComparator[] comparators; + private FieldComparator comparator1; + private final int numComparators; + private int[] reverseMul; + private int reverseMul1 = 0; + + private final int numHits; + private int totalHits; + private FieldValueHitQueue.Entry bottom = null; + + /** Stores the maximum score value encountered, needed for normalizing. */ + private float maxScore = Float.NEGATIVE_INFINITY; + + private boolean queueFull; + + private boolean fillFields; + + public TopFieldCollector(Sort sort, int numHits, IndexReader[] subReaders, boolean fillFields) + throws IOException { + + if (sort.fields.length == 0) { + throw new IllegalArgumentException("Sort must contain at least one field"); + } + + queue = new FieldValueHitQueue(sort.fields, numHits, subReaders); + comparators = queue.getComparators(); + reverseMul = queue.getReverseMul(); + numComparators = comparators.length; + + if (numComparators == 1) { + comparator1 = comparators[0]; + reverseMul1 = reverseMul[0]; + } else { + comparator1 = null; + reverseMul1 = 0; + } + this.numHits = numHits; + this.fillFields = fillFields; + } + + int currentDocBase; + + // javadoc inherited + public void setNextReader(IndexReader reader, int docBase) throws IOException { + final int numSlotsFull; + if (queueFull) + numSlotsFull = numHits; + else + numSlotsFull = totalHits; + + currentDocBase = docBase; + + for (int i = 0; i < numComparators; i++) { + comparators[i].setNextReader(reader, docBase, numSlotsFull); + } + } + + private final void updateBottom(int doc, float score) { + bottom.docID = currentDocBase + doc; + bottom.score = score; + queue.adjustTop(); + bottom = (FieldValueHitQueue.Entry) queue.top(); + } + + private final void add(int slot, int doc, float score) { + queue.put(new FieldValueHitQueue.Entry(slot, currentDocBase+doc, score)); + bottom = (FieldValueHitQueue.Entry) queue.top(); + queueFull = totalHits == numHits; + } + + // javadoc inherited + public void collect(int doc, float score) { + if (score > 0.0f) { + + maxScore = Math.max(maxScore, score); + totalHits++; + + // TODO: one optimization we could do is to pre-fill + // the queue with sentinel value that guaranteed to + // always compare lower than a real hit; this would + // save having to check queueFull on each insert + + if (queueFull) { + + if (numComparators == 1) { + // Common case + + // Fastmatch: return if this hit is not competitive + final int cmp = reverseMul1 * comparator1.compareBottom(doc, score); + if (cmp < 0) { + // Definitely not competitive + return; + } else if (cmp == 0 && doc + currentDocBase > bottom.docID) { + // Definitely not competitive + return; + } + + // This hit is competitive -- replace bottom + // element in queue & adjustTop + comparator1.copy(bottom.slot, doc, score); + + updateBottom(doc, score); + + comparator1.setBottom(bottom.slot); + + } else { + + // Fastmatch: return if this hit is not competitive + for(int i=0;;i++) { + final int c = reverseMul[i] * comparators[i].compareBottom(doc, score); + if (c < 0) { + // Definitely not competitive + return; + } else if (c > 0) { + // Definitely competitive + break; + } else if (i == numComparators-1) { + // This is the equals case. + if (doc + currentDocBase > bottom.docID) { + // Definitely not competitive + return; + } else { + break; + } + } + } + + // This hit is competitive -- replace bottom + // element in queue & adjustTop + for (int i = 0; i < numComparators; i++) { + comparators[i].copy(bottom.slot, doc, score); + } + + updateBottom(doc, score); + + for(int i=0;i= 0; i--) { + scoreDocs[i] = queue.fillFields((FieldValueHitQueue.Entry) queue.pop()); + } + } else { + Entry entry = (FieldValueHitQueue.Entry) queue.pop(); + for (int i = queue.size() - 1; i >= 0; i--) { + scoreDocs[i] = new FieldDoc(entry.docID, + entry.score); + } + } + + return new TopFieldDocs(totalHits, scoreDocs, queue.getFields(), maxScore); + } +} Property changes on: src/java/org/apache/lucene/search/TopFieldCollector.java ___________________________________________________________________ Added: svn:eol-style + native Index: src/java/org/apache/lucene/search/ParallelMultiSearcher.java =================================================================== --- src/java/org/apache/lucene/search/ParallelMultiSearcher.java (revision 736355) +++ src/java/org/apache/lucene/search/ParallelMultiSearcher.java (working copy) @@ -20,6 +20,7 @@ import java.io.IOException; import org.apache.lucene.index.Term; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.util.PriorityQueue; /** Implements parallel search over a set of Searchables. @@ -170,12 +171,34 @@ final int start = starts[i]; - searchables[i].search(weight, filter, new HitCollector() { - public void collect(int doc, float score) { - results.collect(doc + start, score); - } - }); + final MultiReaderHitCollector hc; + if (results instanceof MultiReaderHitCollector) { + // results can shift + final MultiReaderHitCollector resultsMulti = (MultiReaderHitCollector) results; + hc = new MultiReaderHitCollector() { + public void collect(int doc, float score) { + resultsMulti.collect(doc, score); + } + public void setNextReader(IndexReader reader, int docBase) throws IOException { + resultsMulti.setNextReader(reader, start+docBase); + } + }; + } else { + // We must shift the docIDs + hc = new MultiReaderHitCollector() { + private int docBase; + public void collect(int doc, float score) { + results.collect(doc + docBase + start, score); + } + + public void setNextReader(IndexReader reader, int docBase) { + this.docBase = docBase; + } + }; + } + + searchables[i].search(weight, filter, hc); } } Index: src/java/org/apache/lucene/search/IndexSearcher.java =================================================================== --- src/java/org/apache/lucene/search/IndexSearcher.java (revision 735652) +++ src/java/org/apache/lucene/search/IndexSearcher.java (working copy) @@ -17,6 +17,10 @@ * limitations under the License. */ +import java.io.IOException; +import java.util.List; +import java.util.ArrayList; +import org.apache.lucene.util.SorterTemplate; import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldSelector; import org.apache.lucene.index.CorruptIndexException; @@ -24,8 +28,6 @@ import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; -import java.io.IOException; - /** Implements search over a single IndexReader. * *

Applications usually need only call the inherited {@link #search(Query)} @@ -38,6 +40,8 @@ public class IndexSearcher extends Searcher { IndexReader reader; private boolean closeReader; + private IndexReader[] sortedSubReaders; + private int[] sortedStarts; /** Creates a searcher searching the index in the named directory. * @throws CorruptIndexException if the index is corrupt @@ -63,8 +67,65 @@ private IndexSearcher(IndexReader r, boolean closeReader) { reader = r; this.closeReader = closeReader; + sortSubReaders(); } + protected void gatherSubReaders(List allSubReaders, IndexReader r) { + IndexReader[] subReaders = r.getSequentialReaders(); + if (subReaders.length == 0) { + // Do nothing + } else if (subReaders.length == 1) { + // Add the one sub-reader and do not recurse + allSubReaders.add(subReaders[0]); + } else { + for(int i=0;i num2) + return -1; + if (num1 < num2) + return 1; + return 0; + } + protected void swap(int i, int j) { + IndexReader temp = sortedSubReaders[i]; + sortedSubReaders[i] = sortedSubReaders[j]; + sortedSubReaders[j] = temp; + + int tempInt = sortedStarts[i]; + sortedStarts[i] = sortedStarts[j]; + sortedStarts[j] = tempInt; + } + }; + sorter.quickSort(0, length - 1); + } + /** Return the {@link IndexReader} this searches. */ public IndexReader getIndexReader() { return reader; @@ -108,7 +169,7 @@ if (nDocs <= 0) // null might be returned from hq.top() below. throw new IllegalArgumentException("nDocs must be > 0"); - TopDocCollector collector = new TopDocCollector(nDocs); + TopScoreDocCollector collector = new TopScoreDocCollector(nDocs); search(weight, filter, collector); return collector.topDocs(); } @@ -117,17 +178,74 @@ public TopFieldDocs search(Weight weight, Filter filter, final int nDocs, Sort sort) throws IOException { + return search(weight, filter, nDocs, sort, true); + } + + /** + * Just like {@link #search(Weight, Filter, int, Sort)}, + * but you choose whether or not the fields in the + * returned {@link FieldDoc} instances should be set by + * specifying fillFields. + */ + public TopFieldDocs search(Weight weight, Filter filter, final int nDocs, + Sort sort, boolean fillFields) + throws IOException { + + SortField[] fields = sort.fields; + boolean legacy = false; + for(int i = 0; i < fields.length; i++) { + SortField field = fields[i]; + String fieldname = field.getField(); + int type = field.getType(); + // Resolve AUTO into its true type + if (type == SortField.AUTO) { + int autotype = FieldValueHitQueue.detectFieldType(reader, fieldname); + if (autotype == SortField.STRING) { + fields[i] = new SortField (fieldname, field.getLocale(), field.getReverse()); + } else { + fields[i] = new SortField (fieldname, autotype, field.getReverse()); + } + } - TopFieldDocCollector collector = - new TopFieldDocCollector(reader, sort, nDocs); - search(weight, filter, collector); - return (TopFieldDocs)collector.topDocs(); + if (field.getUseLegacySearch()) { + legacy = true; + } + } + + if (legacy) { + // Search the single top-level reader + TopScoreDocCollector collector = new TopFieldDocCollector(reader, sort, nDocs); + collector.setNextReader(reader, 0); + doSearch(reader, weight, filter, collector); + return (TopFieldDocs) collector.topDocs(); + } else { + // Search each sub-reader + TopFieldCollector collector = new TopFieldCollector(sort, nDocs, sortedSubReaders, fillFields); + search(weight, filter, collector); + return (TopFieldDocs) collector.topDocs(); + } } // inherit javadoc - public void search(Weight weight, Filter filter, - final HitCollector results) throws IOException { + public void search(Weight weight, Filter filter, HitCollector results) + throws IOException { + final MultiReaderHitCollector collector; + if (results instanceof MultiReaderHitCollector) { + collector = (MultiReaderHitCollector) results; + } else { + collector = new MultiReaderCollectorWrapper(results); + } + + for (int i = 0; i < sortedSubReaders.length; i++) { // search each subreader + collector.setNextReader(sortedSubReaders[i], sortedStarts[i]); + doSearch(sortedSubReaders[i], weight, filter, collector); + } + } + + private void doSearch(IndexReader reader, Weight weight, Filter filter, + final HitCollector results) throws IOException { + Scorer scorer = weight.scorer(reader); if (scorer == null) return; @@ -169,4 +287,26 @@ public Explanation explain(Weight weight, int doc) throws IOException { return weight.explain(reader, doc); } + + /** + * Wrapper for non expert ({@link HitCollector}) + * implementations, which simply re-bases the incoming + * docID before calling {@link HitCollector#collect}. + */ + static class MultiReaderCollectorWrapper extends MultiReaderHitCollector { + private HitCollector collector; + private int base = -1; + + public MultiReaderCollectorWrapper(HitCollector collector) { + this.collector = collector; + } + + public void collect(int doc, float score) { + collector.collect(doc + base, score); + } + + public void setNextReader(IndexReader reader, int docBase) { + base = docBase; + } + } } Index: src/java/org/apache/lucene/search/Hit.java =================================================================== --- src/java/org/apache/lucene/search/Hit.java (revision 735652) +++ src/java/org/apache/lucene/search/Hit.java (working copy) @@ -26,7 +26,7 @@ * Wrapper used by {@link HitIterator} to provide a lazily loaded hit * from {@link Hits}. * - * @deprecated Hits will be removed in Lucene 3.0. Use {@link TopDocCollector} and {@link TopDocs} instead. + * @deprecated Hits will be removed in Lucene 3.0. Use {@link TopScoreDocCollector} and {@link TopDocs} instead. */ public class Hit implements java.io.Serializable { Index: src/java/org/apache/lucene/search/FieldComparatorSource.java =================================================================== --- src/java/org/apache/lucene/search/FieldComparatorSource.java (revision 0) +++ src/java/org/apache/lucene/search/FieldComparatorSource.java (revision 0) @@ -0,0 +1,43 @@ +package org.apache.lucene.search; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import org.apache.lucene.index.IndexReader; + +/** + * Provides a {@link FieldComparator} for custom field sorting. + * + * NOTE: This API is experimental and might change in + * incompatible ways in the next release. + * + */ +public abstract class FieldComparatorSource { + + /** + * Creates a comparator for the field in the given index. + * + * @param fieldname + * Name of the field to create comparator for. + * @return FieldComparator. + * @throws IOException + * If an error occurs reading the index. + */ + abstract FieldComparator newComparator(String fieldname, IndexReader[] subReaders, int numHits, int sortPos, boolean reversed) + throws IOException; +} Property changes on: src/java/org/apache/lucene/search/FieldComparatorSource.java ___________________________________________________________________ Added: svn:eol-style + native Index: src/java/org/apache/lucene/index/MultiReader.java =================================================================== --- src/java/org/apache/lucene/index/MultiReader.java (revision 735652) +++ src/java/org/apache/lucene/index/MultiReader.java (working copy) @@ -364,6 +364,10 @@ throw new UnsupportedOperationException("MultiReader does not support this method."); } + public IndexReader[] getSequentialReaders() { + return subReaders; + } + // for testing IndexReader[] getSubReaders() { return subReaders; Index: src/java/org/apache/lucene/index/FilterIndexReader.java =================================================================== --- src/java/org/apache/lucene/index/FilterIndexReader.java (revision 735652) +++ src/java/org/apache/lucene/index/FilterIndexReader.java (working copy) @@ -227,4 +227,8 @@ ensureOpen(); return in.isOptimized(); } + + public IndexReader[] getSequentialReaders() { + return in.getSequentialReaders(); + } } Index: src/java/org/apache/lucene/index/IndexReader.java =================================================================== --- src/java/org/apache/lucene/index/IndexReader.java (revision 735652) +++ src/java/org/apache/lucene/index/IndexReader.java (working copy) @@ -1134,4 +1134,13 @@ public static Collection listCommits(Directory dir) throws IOException { return DirectoryIndexReader.listCommits(dir); } + + /** Returns the sequential readers that this reader is + * logically composed of. IndexSearcher uses this API to + * drive searching by one child reader at a time. If + * a reader is not composed of sequential child + * readers it should return a length-1 array of itself. */ + public IndexReader[] getSequentialReaders() { + return new IndexReader[] {this}; + } } Index: src/java/org/apache/lucene/index/MultiSegmentReader.java =================================================================== --- src/java/org/apache/lucene/index/MultiSegmentReader.java (revision 735652) +++ src/java/org/apache/lucene/index/MultiSegmentReader.java (working copy) @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -432,6 +431,10 @@ return fieldSet; } + public IndexReader[] getSequentialReaders() { + return subReaders; + } + // for testing SegmentReader[] getSubReaders() { return subReaders; Index: src/java/org/apache/lucene/util/SorterTemplate.java =================================================================== --- src/java/org/apache/lucene/util/SorterTemplate.java (revision 0) +++ src/java/org/apache/lucene/util/SorterTemplate.java (revision 0) @@ -0,0 +1,176 @@ +package org.apache.lucene.util; + +/* + * Copyright 2003 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Borrowed from Cglib. Allows custom swap so that two arrays can be sorted + * at the same time. + */ +public abstract class SorterTemplate { + private static final int MERGESORT_THRESHOLD = 12; + private static final int QUICKSORT_THRESHOLD = 7; + + abstract protected void swap(int i, int j); + abstract protected int compare(int i, int j); + + public void quickSort(int lo, int hi) { + quickSortHelper(lo, hi); + insertionSort(lo, hi); + } + + private void quickSortHelper(int lo, int hi) { + for (;;) { + int diff = hi - lo; + if (diff <= QUICKSORT_THRESHOLD) { + break; + } + int i = (hi + lo) / 2; + if (compare(lo, i) > 0) { + swap(lo, i); + } + if (compare(lo, hi) > 0) { + swap(lo, hi); + } + if (compare(i, hi) > 0) { + swap(i, hi); + } + int j = hi - 1; + swap(i, j); + i = lo; + int v = j; + for (;;) { + while (compare(++i, v) < 0) { + /* nothing */; + } + while (compare(--j, v) > 0) { + /* nothing */; + } + if (j < i) { + break; + } + swap(i, j); + } + swap(i, hi - 1); + if (j - lo <= hi - i + 1) { + quickSortHelper(lo, j); + lo = i + 1; + } else { + quickSortHelper(i + 1, hi); + hi = j; + } + } + } + + private void insertionSort(int lo, int hi) { + for (int i = lo + 1 ; i <= hi; i++) { + for (int j = i; j > lo; j--) { + if (compare(j - 1, j) > 0) { + swap(j - 1, j); + } else { + break; + } + } + } + } + + protected void mergeSort(int lo, int hi) { + int diff = hi - lo; + if (diff <= MERGESORT_THRESHOLD) { + insertionSort(lo, hi); + return; + } + int mid = lo + diff / 2; + mergeSort(lo, mid); + mergeSort(mid, hi); + merge(lo, mid, hi, mid - lo, hi - mid); + } + + private void merge(int lo, int pivot, int hi, int len1, int len2) { + if (len1 == 0 || len2 == 0) { + return; + } + if (len1 + len2 == 2) { + if (compare(pivot, lo) < 0) { + swap(pivot, lo); + } + return; + } + int first_cut, second_cut; + int len11, len22; + if (len1 > len2) { + len11 = len1 / 2; + first_cut = lo + len11; + second_cut = lower(pivot, hi, first_cut); + len22 = second_cut - pivot; + } else { + len22 = len2 / 2; + second_cut = pivot + len22; + first_cut = upper(lo, pivot, second_cut); + len11 = first_cut - lo; + } + rotate(first_cut, pivot, second_cut); + int new_mid = first_cut + len22; + merge(lo, first_cut, new_mid, len11, len22); + merge(new_mid, second_cut, hi, len1 - len11, len2 - len22); + } + + private void rotate(int lo, int mid, int hi) { + int lot = lo; + int hit = mid - 1; + while (lot < hit) { + swap(lot++, hit--); + } + lot = mid; hit = hi - 1; + while (lot < hit) { + swap(lot++, hit--); + } + lot = lo; hit = hi - 1; + while (lot < hit) { + swap(lot++, hit--); + } + } + + private int lower(int lo, int hi, int val) { + int len = hi - lo; + while (len > 0) { + int half = len / 2; + int mid= lo + half; + if (compare(mid, val) < 0) { + lo = mid + 1; + len = len - half -1; + } else { + len = half; + } + } + return lo; + } + + private int upper(int lo, int hi, int val) { + int len = hi - lo; + while (len > 0) { + int half = len / 2; + int mid = lo + half; + if (compare(val, mid) < 0) { + len = half; + } else { + lo = mid + 1; + len = len - half -1; + } + } + return lo; + } +} Property changes on: src/java/org/apache/lucene/util/SorterTemplate.java ___________________________________________________________________ Added: svn:eol-style + native Index: src/demo/org/apache/lucene/demo/SearchFiles.java =================================================================== --- src/demo/org/apache/lucene/demo/SearchFiles.java (revision 735652) +++ src/demo/org/apache/lucene/demo/SearchFiles.java (working copy) @@ -34,7 +34,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Searcher; -import org.apache.lucene.search.TopDocCollector; +import org.apache.lucene.search.TopScoreDocCollector; /** Simple command-line based search demo. */ public class SearchFiles { @@ -193,7 +193,7 @@ int hitsPerPage, boolean raw, boolean interactive) throws IOException { // Collect enough docs to show 5 pages - TopDocCollector collector = new TopDocCollector(5 * hitsPerPage); + TopScoreDocCollector collector = new TopScoreDocCollector(5 * hitsPerPage); searcher.search(query, collector); ScoreDoc[] hits = collector.topDocs().scoreDocs; @@ -212,7 +212,7 @@ break; } - collector = new TopDocCollector(numTotalHits); + collector = new TopScoreDocCollector(numTotalHits); searcher.search(query, collector); hits = collector.topDocs().scoreDocs; } Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchWithSortTask.java =================================================================== --- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchWithSortTask.java (revision 735652) +++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchWithSortTask.java (working copy) @@ -45,18 +45,24 @@ SortField[] sortFields = new SortField[fields.length]; for (int i = 0; i < fields.length; i++) { String field = fields[i]; - int index = field.lastIndexOf(":"); - String fieldName; - String typeString; - if (index != -1) { - fieldName = field.substring(0, index); - typeString = field.substring(1+index, field.length()); + SortField sortField0; + if (field.equals("doc")) { + sortField0 = SortField.FIELD_DOC; } else { - typeString = "auto"; - fieldName = field; + int index = field.lastIndexOf(":"); + String fieldName; + String typeString; + if (index != -1) { + fieldName = field.substring(0, index); + typeString = field.substring(1+index, field.length()); + } else { + typeString = "auto"; + fieldName = field; + } + int type = getType(typeString); + sortField0 = new SortField(fieldName, type); } - int type = getType(typeString); - sortFields[i] = new SortField(fieldName, type); + sortFields[i] = sortField0; } this.sort = new Sort(sortFields); } @@ -69,6 +75,8 @@ type = SortField.INT; } else if (typeString.equals("string")) { type = SortField.STRING; + } else if (typeString.equals("string_val")) { + type = SortField.STRING_VAL; } else { type = SortField.AUTO; } Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SortableSimpleDocMaker.java =================================================================== --- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SortableSimpleDocMaker.java (revision 735652) +++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SortableSimpleDocMaker.java (working copy) @@ -6,20 +6,44 @@ import org.apache.lucene.benchmark.byTask.utils.Config; /** - * Adds fields appropriate for sorting. + * Adds fields appropriate for sorting: country, + * random_string and sort_field (int). * */ public class SortableSimpleDocMaker extends SimpleDocMaker { private int sortRange; + + private static String[] COUNTRIES = new String[] {"European Union", "United States", "Japan", "Germany", "China (PRC)", "United Kingdom", "France", "Italy", "Spain", "Canada", "Brazil", "Russia", "India", "South Korea", "Australia", "Mexico", "Netherlands", "Turkey", "Sweden", "Belgium", "Indonesia", "Switzerland", "Poland", "Norway", "Republic of China", "Saudi Arabia", "Austria", "Greece", "Denmark", "Iran", "South Africa", "Argentina", "Ireland", "Thailand", "Finland", "Venezuela", "Portugal", "Hong Kong", "United Arab Emirates", "Malaysia", "Czech Republic", "Colombia", "Nigeria", "Romania", "Chile", "Israel", "Singapore", "Philippines", "Pakistan", "Ukraine", "Hungary", "Algeria", "New Zealand", "Egypt", "Kuwait", "Peru", "Kazakhstan", "Slovakia", "Morocco", "Bangladesh", "Vietnam", "Qatar", "Angola", "Libya", "Iraq", "Croatia", "Luxembourg", "Sudan", "Slovenia", "Cuba", "Belarus", "Ecuador", "Serbia", "Oman", "Bulgaria", "Lithuania", "Syria", "Dominican Republic", "Tunisia", "Guatemala", "Azerbaijan", "Sri Lanka", "Kenya", "Latvia", "Turkmenistan", "Costa Rica", "Lebanon", "Uruguay", "Uzbekistan", "Yemen", "Cyprus", "Estonia", "Trinidad and Tobago", "Cameroon", "El Salvador", "Iceland", "Panama", "Bahrain", "Ivory Coast", "Ethiopia", "Tanzania", "Jordan", "Ghana", "Bosnia and Herzegovina", "Macau", "Burma", "Bolivia", "Brunei", "Botswana", "Honduras", "Gabon", "Uganda", "Jamaica", "Zambia", "Senegal", "Paraguay", "Albania", "Equatorial Guinea", "Georgia", "Democratic Republic of the Congo", "Nepal", "Afghanistan", "Cambodia", "Armenia", "Republic of the Congo", "Mozambique", "Republic of Macedonia", "Malta", "Namibia", "Madagascar", "Chad", "Burkina Faso", "Mauritius", "Mali", "The Bahamas", "Papua New Guinea", "Nicaragua", "Haiti", "Benin", "alestinian flag West Bank and Gaza", "Jersey", "Fiji", "Guinea", "Moldova", "Niger", "Laos", "Mongolia", "French Polynesia", "Kyrgyzstan", "Barbados", "Tajikistan", "Malawi", "Liechtenstein", "New Caledonia", "Kosovo", "Rwanda", "Montenegro", "Swaziland", "Guam", "Mauritania", "Guernsey", "Isle of Man", "Togo", "Somalia", "Suriname", "Aruba", "North Korea", "Zimbabwe", "Central African Republic", "Faroe Islands", "Greenland", "Sierra Leone", "Lesotho", "Cape Verde", "Eritrea", "Bhutan", "Belize", "Antigua and Barbuda", "Gibraltar", "Maldives", "San Marino", "Guyana", "Burundi", "Saint Lucia", "Djibouti", "British Virgin Islands", "Liberia", "Seychelles", "The Gambia", "Northern Mariana Islands", "Grenada", "Saint Vincent and the Grenadines", "Saint Kitts and Nevis", "East Timor", "Vanuatu", "Comoros", "Samoa", "Solomon Islands", "Guinea-Bissau", "American Samoa", "Dominica", "Micronesia", "Tonga", "Cook Islands", "Palau", "Marshall Islands", "S�o Tom� and Pr�ncipe", "Anguilla", "Kiribati", "Tuvalu", "Niue"}; protected DocData getNextDocData() throws NoMoreDataException { + Random r = new Random(); DocData doc = super.getNextDocData(); Properties props = new Properties(); - props.put("sort_field", Integer.toString(getRandomNumber(0, sortRange))); + + // random int + props.put("sort_field", Integer.toString(nextInt(r, sortRange))); + + // random string + int len = nextInt(r, 2, 20); + char[] buffer = new char[len]; + for(int i=0;i