Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java (revision 1470386) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java (working copy) @@ -75,6 +75,7 @@ private int tokEnd; // only used if the length changed before this filter private boolean hasIllegalOffsets; // only if the length changed before this filter private int savePosIncr; + private boolean isFirstToken = true; private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); @@ -138,9 +139,8 @@ savePosIncr = posIncrAtt.getPositionIncrement(); } } - if (curGramSize <= maxGram) { - if (! (curGramSize > curTermLength // if the remaining input is too short, we can't generate any n-grams - || curGramSize > maxGram)) { // if we have hit the end of our n-gram size range, quit + if (curGramSize <= maxGram) { // if we have hit the end of our n-gram size range, quit + if (curGramSize <= curTermLength) { // if the remaining input is too short, we can't generate any n-grams // grab gramSize chars from front or back int start = side == Side.FRONT ? 0 : curTermLength - curGramSize; int end = start + curGramSize; @@ -152,12 +152,16 @@ } // first ngram gets increment, others don't if (curGramSize == minGram) { - posIncrAtt.setPositionIncrement(savePosIncr); + // Leave the first token position increment at the cleared-attribute value of 1 + if ( ! isFirstToken) { + posIncrAtt.setPositionIncrement(savePosIncr); + } } else { posIncrAtt.setPositionIncrement(0); } termAtt.copyBuffer(curTermBuffer, start, curGramSize); curGramSize++; + isFirstToken = false; return true; } } @@ -169,5 +173,6 @@ public void reset() throws IOException { super.reset(); curTermBuffer = null; + isFirstToken = true; } } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java (revision 1470386) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java (working copy) @@ -26,6 +26,7 @@ import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter; +import org.apache.lucene.analysis.position.PositionFilter; import java.io.Reader; import java.io.StringReader; @@ -120,6 +121,21 @@ false); } + public void testFirstTokenPositionIncrement() throws Exception { + TokenStream ts = new MockTokenizer(new StringReader("a abc"), MockTokenizer.WHITESPACE, false); + ts = new PositionFilter(ts, 0); // All but first token will get 0 position increment + EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(ts, EdgeNGramTokenFilter.Side.FRONT, 2, 3); + // The first token "a" will not be output, since it's smaller than the mingram size of 2. + // The second token on input to EdgeNGramTokenFilter will have position increment of 0, + // which should be increased to 1, since this is the first output token in the stream. + assertTokenStreamContents(filter, + new String[] { "ab", "abc" }, + new int[] { 2, 2 }, + new int[] { 4, 5 }, + new int[] { 1, 0 } + ); + } + public void testTokenizerPositions() throws Exception { EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(new StringReader("abcde"), EdgeNGramTokenizer.Side.FRONT, 1, 3); assertTokenStreamContents(tokenizer,