Index: src/test/org/apache/lucene/analysis/TestToken.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestToken.java	(revision 705841)
+++ src/test/org/apache/lucene/analysis/TestToken.java	(working copy)
@@ -17,6 +17,13 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+
+import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 import org.apache.lucene.util.LuceneTestCase;
 
 public class TestToken extends LuceneTestCase {
@@ -174,4 +181,59 @@
     Token copy = (Token) t.clone();
     assertNotSame(buf, copy.termBuffer());
   }
+  
+  public static abstract class TestNewAPITokenFilter extends TokenFilter {
+    public TestNewAPITokenFilter(TokenStream input) {
+      super(input);
+    }
+
+    private Token nextToken = new Token();
+    
+    public boolean incrementToken() throws IOException {
+      nextToken = next(nextToken);
+      if (nextToken == null) return false;
+    
+      incrementTokenUtil(reusableToken, nextToken);
+      return true;
+    }
+  }
+  
+  public static abstract class TestNewAPITokenStream extends TokenStream {
+    private Token nextToken = new Token();
+       
+    public boolean incrementToken() throws IOException {
+      nextToken = next(nextToken);
+      if (nextToken == null) return false;
+    
+      incrementTokenUtil(reusableToken, nextToken);
+      return true;
+    }
+  }
+
+  static void incrementTokenUtil(Token reusableToken, Token nextToken) throws IOException {
+    reusableToken.setTermBuffer(nextToken.termBuffer(), 0, nextToken.termLength());
+    
+    if (reusableToken.hasAttribute(PayloadAttribute.class)) {
+      PayloadAttribute att = (PayloadAttribute) reusableToken.getAttribute(PayloadAttribute.class);
+      att.setPayload(nextToken.getPayload());
+    }
+    if (reusableToken.hasAttribute(OffsetAttribute.class)) {
+      OffsetAttribute att = (OffsetAttribute) reusableToken.getAttribute(OffsetAttribute.class);
+      att.setStartOffset(nextToken.startOffset());
+      att.setEndOffset(nextToken.endOffset());
+    }
+    if (reusableToken.hasAttribute(TypeAttribute.class)) {
+      TypeAttribute att = (TypeAttribute) reusableToken.getAttribute(TypeAttribute.class);
+      att.setType(nextToken.type());
+    }
+    if (reusableToken.hasAttribute(FlagsAttribute.class)) {
+      FlagsAttribute att = (FlagsAttribute) reusableToken.getAttribute(FlagsAttribute.class);
+      att.setFlags(nextToken.getFlags());
+    }
+    if (reusableToken.hasAttribute(PositionIncrementAttribute.class)) {
+      PositionIncrementAttribute att = (PositionIncrementAttribute) reusableToken.getAttribute(PositionIncrementAttribute.class);
+      att.setPositionIncrement(nextToken.getPositionIncrement());
+    }
+    
+  }
 }
Index: src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java	(revision 705841)
+++ src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java	(working copy)
@@ -39,7 +39,7 @@
     Directory dir = new RAMDirectory();
     IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
     Document doc = new Document();
-    TokenStream stream = new TokenStream() {
+    TokenStream stream = new TestToken.TestNewAPITokenStream() {
       private int index = 0;
       
       public Token next(final Token reusableToken) throws IOException {
@@ -92,11 +92,20 @@
   
   private void checkTokens(TokenStream stream) throws IOException {
     int count = 0;
-    final Token reusableToken = new Token();
-    for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
-      assertTrue(count < tokens.length);
-      assertEquals(tokens[count], nextToken.term());
-      count++;
+    if (TokenStream.isUseNewAPI()) {
+      final Token reusableToken = stream.getToken();
+      while (stream.incrementToken()) {
+        assertTrue(count < tokens.length);
+        assertEquals(tokens[count], reusableToken.term());
+        count++;        
+      }
+    } else {
+      final Token reusableToken = new Token();
+      for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
+        assertTrue(count < tokens.length);
+        assertEquals(tokens[count], nextToken.term());
+        count++;
+      }
     }
     
     assertEquals(tokens.length, count);
Index: src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java	(revision 705841)
+++ src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java	(working copy)
@@ -1,6 +1,9 @@
 package org.apache.lucene.analysis;
 
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 import org.apache.lucene.util.LuceneTestCase;
 
 import java.io.StringReader;
@@ -35,19 +38,25 @@
 
   public void assertAnalyzesTo(Analyzer a, String input, String[] expectedImages, String[] expectedTypes, int[] expectedPosIncrs) throws Exception {
     TokenStream ts = a.tokenStream("dummy", new StringReader(input));
-    final Token reusableToken = new Token();
+    final Token reusableToken = ts.getToken();
+    // TODO Java 1.5
+    //final TypeAttribute typeAtt = reusableToken.getAttribute(TypeAttribute.class);
+    //final PositionIncrementAttribute posIncrAtt = reusableToken.getAttribute(PositionIncrementAttribute.class);
+
+    final TypeAttribute typeAtt = (TypeAttribute) reusableToken.getAttribute(TypeAttribute.class);
+    final PositionIncrementAttribute posIncrAtt = (PositionIncrementAttribute) reusableToken.getAttribute(PositionIncrementAttribute.class);
+    
     for (int i = 0; i < expectedImages.length; i++) {
-      Token nextToken = ts.next(reusableToken);
-      assertNotNull(nextToken);
-      assertEquals(expectedImages[i], nextToken.term());
+      assertTrue(ts.incrementToken());
+      assertEquals(expectedImages[i], new String(reusableToken.termBuffer(), 0, reusableToken.termLength()));
       if (expectedTypes != null) {
-        assertEquals(expectedTypes[i], nextToken.type());
+        assertEquals(expectedTypes[i], typeAtt.type());
       }
       if (expectedPosIncrs != null) {
-        assertEquals(expectedPosIncrs[i], nextToken.getPositionIncrement());
+        assertEquals(expectedPosIncrs[i], posIncrAtt.getPositionIncrement());
       }
     }
-    assertNull(ts.next(reusableToken));
+    assertFalse(ts.incrementToken());
     ts.close();
   }
 
Index: src/test/org/apache/lucene/search/TestPositionIncrement.java
===================================================================
--- src/test/org/apache/lucene/search/TestPositionIncrement.java	(revision 705841)
+++ src/test/org/apache/lucene/search/TestPositionIncrement.java	(working copy)
@@ -22,9 +22,11 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.StopFilter;
+import org.apache.lucene.analysis.TestToken;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriter;
@@ -44,11 +46,15 @@
   public void testSetPosition() throws Exception {
     Analyzer analyzer = new Analyzer() {
       public TokenStream tokenStream(String fieldName, Reader reader) {
-        return new TokenStream() {
+        return new TestToken.TestNewAPITokenStream() {
           private final String[] TOKENS = {"1", "2", "3", "4", "5"};
           private final int[] INCREMENTS = {1, 2, 1, 0, 1};
           private int i = 0;
 
+          public void addTokenAttributes() {
+            reusableToken.addAttribute(PositionIncrementAttribute.class);
+          }
+          
           public Token next(final Token reusableToken) {
             assert reusableToken != null;
             if (i == TOKENS.length)
Index: src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java
===================================================================
--- src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java	(revision 705841)
+++ src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java	(working copy)
@@ -21,9 +21,11 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.LowerCaseTokenizer;
+import org.apache.lucene.analysis.TestToken;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriter;
@@ -63,7 +65,7 @@
     }
   }
 
-  private class PayloadFilter extends TokenFilter {
+  private class PayloadFilter extends TestToken.TestNewAPITokenFilter {
     String fieldName;
     int numSeen = 0;
 
@@ -71,6 +73,10 @@
       super(input);
       this.fieldName = fieldName;
     }
+    
+    public void addTokenAttributes() {
+      reusableToken.addAttribute(PayloadAttribute.class);
+    }
 
     public Token next(final Token reusableToken) throws IOException {
       assert reusableToken != null;
Index: src/test/org/apache/lucene/index/TestStressIndexing2.java
===================================================================
--- src/test/org/apache/lucene/index/TestStressIndexing2.java	(revision 705841)
+++ src/test/org/apache/lucene/index/TestStressIndexing2.java	(working copy)
@@ -386,7 +386,8 @@
           for(int k=0;k<pos1.length;k++) {
             assertEquals(pos1[k], pos2[k]);
             if (offsets1 != null) {
-              assertEquals(offsets1[k].getStartOffset(),
+              
+              assertEquals(offsets1[k].getStartOffset() + " != " + offsets2[k].getStartOffset(), offsets1[k].getStartOffset(),
                            offsets2[k].getStartOffset());
               assertEquals(offsets1[k].getEndOffset(),
                            offsets2[k].getEndOffset());
Index: src/test/org/apache/lucene/index/TestTermVectorsReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestTermVectorsReader.java	(revision 705841)
+++ src/test/org/apache/lucene/index/TestTermVectorsReader.java	(working copy)
@@ -20,6 +20,8 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.store.MockRAMDirectory;
@@ -116,8 +118,14 @@
     fieldInfos = new FieldInfos(dir, seg + "." + IndexFileNames.FIELD_INFOS_EXTENSION);
   }
 
-  private class MyTokenStream extends TokenStream {
+  private class MyTokenStream extends org.apache.lucene.analysis.TestToken.TestNewAPITokenStream {
     int tokenUpto;
+    
+    public void addTokenAttributes() {
+      reusableToken.addAttribute(PositionIncrementAttribute.class);
+      reusableToken.addAttribute(OffsetAttribute.class);
+    }
+    
     public Token next(final Token reusableToken) {
       if (tokenUpto >= tokens.length)
         return null;
Index: src/test/org/apache/lucene/index/TestIndexWriter.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriter.java	(revision 705841)
+++ src/test/org/apache/lucene/index/TestIndexWriter.java	(working copy)
@@ -28,6 +28,7 @@
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.UnicodeUtil;
 
+import org.apache.lucene.analysis.TestToken;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
 import org.apache.lucene.analysis.Analyzer;
@@ -36,6 +37,7 @@
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
@@ -1788,7 +1790,7 @@
     IndexWriter writer = new IndexWriter(dir, new Analyzer() {
 
       public TokenStream tokenStream(String fieldName, Reader reader) {
-        return new TokenFilter(new StandardTokenizer(reader)) {
+        return new TestToken.TestNewAPITokenFilter(new StandardTokenizer(reader)) {
           private int count = 0;
 
           public Token next(final Token reusableToken) throws IOException {
@@ -1905,7 +1907,7 @@
     reader.close();
   }
 
-  private class CrashingFilter extends TokenFilter {
+  private class CrashingFilter extends TestToken.TestNewAPITokenFilter {
     String fieldName;
     int count;
 
@@ -3575,21 +3577,40 @@
     }
   }
 
+  private static class MyAnalyzer extends Analyzer {
+
+    public TokenStream tokenStream(String fieldName, Reader reader) {
+      return new TestToken.TestNewAPITokenFilter(new WhitespaceTokenizer(reader)) {
+        public void addTokenAttributes() {
+          reusableToken.addAttribute(PositionIncrementAttribute.class);
+        }
+      };
+    }
+    
+  }
+  
   // LUCENE-1255
   public void testNegativePositions() throws Throwable {
-    SinkTokenizer tokens = new SinkTokenizer();
+    SinkTokenizer tokens = new SinkTokenizer() {
+      public void addTokenAttributes() {
+        reusableToken.addAttribute(PositionIncrementAttribute.class);
+      }
+    };
     Token t = new Token();
+    PositionIncrementAttribute att = (PositionIncrementAttribute) t.addAttribute(PositionIncrementAttribute.class);
     t.setTermBuffer("a");
     t.setPositionIncrement(0);
+    att.setPositionIncrement(0);
     tokens.add(t);
     t.setTermBuffer("b");
     t.setPositionIncrement(1);
+    att.setPositionIncrement(1);
     tokens.add(t);
     t.setTermBuffer("c");
     tokens.add(t);
 
     MockRAMDirectory dir = new MockRAMDirectory();
-    IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter w = new IndexWriter(dir, new MyAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
     Document doc = new Document();
     doc.add(new Field("field", tokens));
     w.addDocument(doc);
Index: src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
===================================================================
--- src/test/org/apache/lucene/index/TestMultiLevelSkipList.java	(revision 705841)
+++ src/test/org/apache/lucene/index/TestMultiLevelSkipList.java	(working copy)
@@ -24,9 +24,11 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.LowerCaseTokenizer;
+import org.apache.lucene.analysis.TestToken;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Field.Index;
@@ -96,9 +98,13 @@
 
   }
 
-  private static class PayloadFilter extends TokenFilter {
+  private static class PayloadFilter extends TestToken.TestNewAPITokenFilter {
     static int count = 0;
     
+    public void addTokenAttributes() {
+      reusableToken.addAttribute(PayloadAttribute.class);
+    }
+    
     protected PayloadFilter(TokenStream input) {
       super(input);
     }
Index: src/test/org/apache/lucene/index/TestDocumentWriter.java
===================================================================
--- src/test/org/apache/lucene/index/TestDocumentWriter.java	(revision 705841)
+++ src/test/org/apache/lucene/index/TestDocumentWriter.java	(working copy)
@@ -22,12 +22,15 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.SimpleAnalyzer;
+import org.apache.lucene.analysis.TestToken;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Fieldable;
@@ -136,7 +139,7 @@
   public void testTokenReuse() throws IOException {
     Analyzer analyzer = new Analyzer() {
       public TokenStream tokenStream(String fieldName, Reader reader) {
-        return new TokenFilter(new WhitespaceTokenizer(reader)) {
+        return new TestToken.TestNewAPITokenFilter(new WhitespaceTokenizer(reader)) {
           boolean first=true;
           Token buffered;
 
@@ -165,6 +168,15 @@
 
             return nextToken;
           }
+          
+          PayloadAttribute payloadAtt = null;
+          PositionIncrementAttribute posIncrAtt = null;
+          
+          public void addTokenAttributes() {
+            payloadAtt = (PayloadAttribute) reusableToken.addAttribute(PayloadAttribute.class);
+            posIncrAtt = (PositionIncrementAttribute) reusableToken.addAttribute(PositionIncrementAttribute.class);
+          }
+          
         };
       }
     };
@@ -197,7 +209,7 @@
     IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
     Document doc = new Document();
     
-    doc.add(new Field("preanalyzed", new TokenStream() {
+    doc.add(new Field("preanalyzed", new TestToken.TestNewAPITokenStream() {
       private String[] tokens = new String[] {"term1", "term2", "term3", "term2"};
       private int index = 0;
       
Index: src/test/org/apache/lucene/index/TestPayloads.java
===================================================================
--- src/test/org/apache/lucene/index/TestPayloads.java	(revision 705841)
+++ src/test/org/apache/lucene/index/TestPayloads.java	(working copy)
@@ -31,11 +31,13 @@
 import org.apache.lucene.util.UnicodeUtil;
 
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TestToken;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
 import org.apache.lucene.analysis.WhitespaceTokenizer;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.store.Directory;
@@ -437,12 +439,16 @@
     /**
      * This Filter adds payloads to the tokens.
      */
-    private static class PayloadFilter extends TokenFilter {
+    private static class PayloadFilter extends TestToken.TestNewAPITokenFilter {
         private byte[] data;
         private int length;
         private int offset;
         Payload payload = new Payload();
         
+        public void addTokenAttributes() {
+          reusableToken.addAttribute(PayloadAttribute.class);
+        }
+        
         public PayloadFilter(TokenStream in, byte[] data, int offset, int length) {
             super(in);
             this.data = data;
@@ -524,11 +530,16 @@
         assertEquals(pool.size(), numThreads);
     }
     
-    private static class PoolingPayloadTokenStream extends TokenStream {
+    private static class PoolingPayloadTokenStream extends TestToken.TestNewAPITokenStream {
         private byte[] payload;
         private boolean first;
         private ByteArrayPool pool;
         private String term;
+
+        public void addTokenAttributes() {
+          reusableToken.addAttribute(PayloadAttribute.class);
+        }
+        
         PoolingPayloadTokenStream(ByteArrayPool pool) {
             this.pool = pool;
             payload = pool.get();
Index: src/test/org/apache/lucene/util/LuceneTestCase.java
===================================================================
--- src/test/org/apache/lucene/util/LuceneTestCase.java	(revision 705841)
+++ src/test/org/apache/lucene/util/LuceneTestCase.java	(working copy)
@@ -17,6 +17,7 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.index.ConcurrentMergeScheduler;
 import junit.framework.TestCase;
 
@@ -42,6 +43,7 @@
 
   protected void setUp() throws Exception {
     ConcurrentMergeScheduler.setTestMode();
+    TokenStream.setUseNewAPI(false);
   }
 
   protected void tearDown() throws Exception {
Index: src/java/org/apache/lucene/analysis/SinkTokenizer.java
===================================================================
--- src/java/org/apache/lucene/analysis/SinkTokenizer.java	(revision 705841)
+++ src/java/org/apache/lucene/analysis/SinkTokenizer.java	(working copy)
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 
 
@@ -66,6 +67,24 @@
    * @return The next {@link org.apache.lucene.analysis.Token} in the Sink.
    * @throws IOException
    */
+  public boolean incrementToken() throws IOException {
+    assert reusableToken != null;
+    if (iter == null) iter = lst.iterator();
+    // Since this TokenStream can be reset we have to maintain the tokens as immutable
+    if (iter.hasNext()) {
+      Token nextToken = (Token) iter.next();
+      reusableToken.copyFrom(nextToken);
+      return true;
+    }
+    return false;
+  }
+
+  /**
+   * Returns the next token out of the list of cached tokens
+   * @return The next {@link org.apache.lucene.analysis.Token} in the Sink.
+   * @throws IOException
+   * @deprecated
+   */
   public Token next(final Token reusableToken) throws IOException {
     assert reusableToken != null;
     if (iter == null) iter = lst.iterator();
@@ -77,8 +96,6 @@
     return null;
   }
 
-
-
   /**
    * Override this method to cache only certain tokens, or new tokens based
    * on the old tokens.
Index: src/java/org/apache/lucene/analysis/CachingTokenFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/CachingTokenFilter.java	(revision 705841)
+++ src/java/org/apache/lucene/analysis/CachingTokenFilter.java	(working copy)
@@ -40,6 +40,26 @@
     super(input);
   }
   
+  public boolean incrementToken() throws IOException {
+    assert reusableToken != null;
+    if (cache == null) {
+      // fill cache lazily
+      cache = new LinkedList();
+      fillCache();
+      iterator = cache.iterator();
+    }
+    
+    if (!iterator.hasNext()) {
+      // the cache is exhausted, return null
+      return false;
+    }
+    // Since the TokenFilter can be reset, the tokens need to be preserved as immutable.
+    Token t = (Token) iterator.next();
+    reusableToken.copyFrom(t);
+    return true;
+  }
+  
+  /** @deprecated */
   public Token next(final Token reusableToken) throws IOException {
     assert reusableToken != null;
     if (cache == null) {
@@ -60,10 +80,17 @@
   
   public void reset() throws IOException {
     if(cache != null) {
-    	iterator = cache.iterator();
+      iterator = cache.iterator();
     }
   }
   
+  private void fillCache() throws IOException {
+    while(input.incrementToken()) {
+      cache.add((Token)reusableToken.clone());
+    }
+  }
+  
+  /** @deprecated */
   private void fillCache(final Token reusableToken) throws IOException {
     for (Token nextToken = input.next(reusableToken); nextToken != null; nextToken = input.next(reusableToken)) {
       cache.add(nextToken.clone());
Index: src/java/org/apache/lucene/analysis/CharTokenizer.java
===================================================================
--- src/java/org/apache/lucene/analysis/CharTokenizer.java	(revision 705841)
+++ src/java/org/apache/lucene/analysis/CharTokenizer.java	(working copy)
@@ -20,6 +20,8 @@
 import java.io.IOException;
 import java.io.Reader;
 
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+
 /** An abstract base class for simple, character-oriented tokenizers.*/
 public abstract class CharTokenizer extends Tokenizer {
   public CharTokenizer(Reader input) {
@@ -30,6 +32,8 @@
   private static final int MAX_WORD_LEN = 255;
   private static final int IO_BUFFER_SIZE = 4096;
   private final char[] ioBuffer = new char[IO_BUFFER_SIZE];
+  
+  private OffsetAttribute offsetAtt;
 
   /** Returns true iff a character should be included in a token.  This
    * tokenizer generates as tokens adjacent sequences of characters which
@@ -43,7 +47,59 @@
   protected char normalize(char c) {
     return c;
   }
+  
+  protected void addTokenAttributes() {
+    // TODO Java 1.5
+    //offsetAtt = reusableToken.addAttribute(OffsetAttribute.class);
+    
+    offsetAtt = (OffsetAttribute) reusableToken.addAttribute(OffsetAttribute.class);
+  }
 
+  public final boolean incrementToken() throws IOException {
+    assert reusableToken != null;
+    reusableToken.clear();
+    int length = 0;
+    int start = bufferIndex;
+    char[] buffer = reusableToken.termBuffer();
+    while (true) {
+
+      if (bufferIndex >= dataLen) {
+        offset += dataLen;
+        dataLen = input.read(ioBuffer);
+        if (dataLen == -1) {
+          if (length > 0)
+            break;
+          else
+            return false;
+        }
+        bufferIndex = 0;
+      }
+
+      final char c = ioBuffer[bufferIndex++];
+
+      if (isTokenChar(c)) {               // if it's a token char
+
+        if (length == 0)                 // start of token
+          start = offset + bufferIndex - 1;
+        else if (length == buffer.length)
+          buffer = reusableToken.resizeTermBuffer(1+length);
+
+        buffer[length++] = normalize(c); // buffer it, normalized
+
+        if (length == MAX_WORD_LEN)      // buffer overflow!
+          break;
+
+      } else if (length > 0)             // at non-Letter w/ chars
+        break;                           // return 'em
+    }
+
+    reusableToken.setTermLength(length);
+    offsetAtt.setStartOffset(start);
+    offsetAtt.setEndOffset(start+length);
+    return true;
+  }
+
+  /** @deprecated */
   public final Token next(final Token reusableToken) throws IOException {
     assert reusableToken != null;
     reusableToken.clear();
Index: src/java/org/apache/lucene/analysis/PorterStemFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/PorterStemFilter.java	(revision 705841)
+++ src/java/org/apache/lucene/analysis/PorterStemFilter.java	(working copy)
@@ -45,6 +45,18 @@
     stemmer = new PorterStemmer();
   }
 
+  public final boolean incrementToken() throws IOException {
+    assert reusableToken != null;
+    
+    if (!input.incrementToken())
+      return false;
+
+    if (stemmer.stem(reusableToken.termBuffer(), 0, reusableToken.termLength()))
+      reusableToken.setTermBuffer(stemmer.getResultBuffer(), 0, stemmer.getResultLength());
+    return true;
+  }
+  
+  /** @deprecated */
   public final Token next(final Token reusableToken) throws IOException {
     assert reusableToken != null;
     Token nextToken = input.next(reusableToken);
Index: src/java/org/apache/lucene/analysis/KeywordTokenizer.java
===================================================================
--- src/java/org/apache/lucene/analysis/KeywordTokenizer.java	(revision 705841)
+++ src/java/org/apache/lucene/analysis/KeywordTokenizer.java	(working copy)
@@ -37,7 +37,28 @@
     super(input);
     this.done = false;
   }
+  
+  public boolean incrementToken() throws IOException {
+    assert reusableToken != null;
+    if (!done) {
+      done = true;
+      int upto = 0;
+      reusableToken.clear();
+      char[] buffer = reusableToken.termBuffer();
+      while (true) {
+        final int length = input.read(buffer, upto, buffer.length-upto);
+        if (length == -1) break;
+        upto += length;
+        if (upto == buffer.length)
+          buffer = reusableToken.resizeTermBuffer(1+buffer.length);
+      }
+      reusableToken.setTermLength(upto);
+      return true;
+    }
+    return false;
+  }
 
+  /** @deprecated */
   public Token next(final Token reusableToken) throws IOException {
     assert reusableToken != null;
     if (!done) {
Index: src/java/org/apache/lucene/analysis/standard/StandardFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/standard/StandardFilter.java	(revision 705841)
+++ src/java/org/apache/lucene/analysis/standard/StandardFilter.java	(working copy)
@@ -20,6 +20,7 @@
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 
 /** Normalizes tokens extracted with {@link StandardTokenizer}. */
 
@@ -34,10 +35,53 @@
   private static final String APOSTROPHE_TYPE = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.APOSTROPHE];
   private static final String ACRONYM_TYPE = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.ACRONYM];
 
+  // this filters uses attribute type
+  private TypeAttribute typeAtt;
+  
+  protected final void addTokenAttributes() {
+    // TODO Java 1.5
+    //typeAtt = reusableToken.addAttribute(TypeAttribute.class);
+    typeAtt = (TypeAttribute) reusableToken.addAttribute(TypeAttribute.class);
+  }
+  
   /** Returns the next token in the stream, or null at EOS.
    * <p>Removes <tt>'s</tt> from the end of words.
    * <p>Removes dots from acronyms.
    */
+  public final boolean incrementToken() throws java.io.IOException {
+    assert reusableToken != null;
+    if (!input.incrementToken()) {
+      return false;
+    }
+
+    char[] buffer = reusableToken.termBuffer();
+    final int bufferLength = reusableToken.termLength();
+    final String type = typeAtt.type();
+
+    if (type == APOSTROPHE_TYPE &&      // remove 's
+  bufferLength >= 2 &&
+        buffer[bufferLength-2] == '\'' &&
+        (buffer[bufferLength-1] == 's' || buffer[bufferLength-1] == 'S')) {
+      // Strip last 2 characters off
+      reusableToken.setTermLength(bufferLength - 2);
+    } else if (type == ACRONYM_TYPE) {      // remove dots
+      int upto = 0;
+      for(int i=0;i<bufferLength;i++) {
+        char c = buffer[i];
+        if (c != '.')
+          buffer[upto++] = c;
+      }
+      reusableToken.setTermLength(upto);
+    }
+
+    return true;
+  }
+  
+  /** Returns the next token in the stream, or null at EOS.
+   * <p>Removes <tt>'s</tt> from the end of words.
+   * <p>Removes dots from acronyms.
+   * @deprecated
+   */
   public final Token next(final Token reusableToken) throws java.io.IOException {
     assert reusableToken != null;
     Token nextToken = input.next(reusableToken);
Index: src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java
===================================================================
--- src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java	(revision 705841)
+++ src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java	(working copy)
@@ -22,6 +22,9 @@
 
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 
 /** A grammar-based tokenizer constructed with JFlex
  *
@@ -127,11 +130,74 @@
     this.scanner = new StandardTokenizerImpl(input);
   }
 
+  // this tokenizer generates three attributes:
+  // offset, positionIncrement and type
+  private OffsetAttribute offsetAtt;
+  private PositionIncrementAttribute posIncrAtt;
+  private TypeAttribute typeAtt;
+
+  protected final void addTokenAttributes() {
+    // TODO Java 1.5
+//    offsetAtt = reusableToken.addAttribute(OffsetAttribute.class);
+//    posIncrAtt = reusableToken.addAttribute(PositionIncrementAttribute.class);
+//    typeAtt = reusableToken.addAttribute(TypeAttribute.class);
+    
+    offsetAtt = (OffsetAttribute) reusableToken.addAttribute(OffsetAttribute.class);
+    posIncrAtt = (PositionIncrementAttribute) reusableToken.addAttribute(PositionIncrementAttribute.class);
+    typeAtt = (TypeAttribute) reusableToken.addAttribute(TypeAttribute.class);
+    
+  }
+
   /*
    * (non-Javadoc)
    *
    * @see org.apache.lucene.analysis.TokenStream#next()
    */
+  public boolean incrementToken() throws IOException {
+    assert reusableToken != null;
+    int posIncr = 1;
+
+    while(true) {
+      int tokenType = scanner.getNextToken();
+
+      if (tokenType == StandardTokenizerImpl.YYEOF) {
+        return false;
+      }
+
+      if (scanner.yylength() <= maxTokenLength) {
+        reusableToken.clear();
+        posIncrAtt.setPositionIncrement(posIncr);
+        scanner.getText(reusableToken);
+        final int start = scanner.yychar();
+        offsetAtt.setStartOffset(start);
+        offsetAtt.setEndOffset(start+reusableToken.termLength());
+        // This 'if' should be removed in the next release. For now, it converts
+        // invalid acronyms to HOST. When removed, only the 'else' part should
+        // remain.
+        if (tokenType == StandardTokenizerImpl.ACRONYM_DEP) {
+          if (replaceInvalidAcronym) {
+            typeAtt.setType(StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.HOST]);
+            reusableToken.setTermLength(reusableToken.termLength() - 1); // remove extra '.'
+          } else {
+            typeAtt.setType(StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.ACRONYM]);
+          }
+        } else {
+          typeAtt.setType(StandardTokenizerImpl.TOKEN_TYPES[tokenType]);
+        }
+        return true;
+      } else
+        // When we skip a too-long term, we still increment the
+        // position increment
+        posIncr++;
+    }
+  }
+  
+  /*
+   * (non-Javadoc)
+   *
+   * @see org.apache.lucene.analysis.TokenStream#next()
+   */
+  /** @deprecated */
   public Token next(final Token reusableToken) throws IOException {
       assert reusableToken != null;
       int posIncr = 1;
Index: src/java/org/apache/lucene/analysis/Token.java
===================================================================
--- src/java/org/apache/lucene/analysis/Token.java	(revision 705841)
+++ src/java/org/apache/lucene/analysis/Token.java	(working copy)
@@ -17,6 +17,12 @@
  * limitations under the License.
  */
 
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.lucene.analysis.tokenattributes.Attribute;
 import org.apache.lucene.index.Payload;
 import org.apache.lucene.index.TermPositions;     // for javadoc
 import org.apache.lucene.util.ArrayUtil;
@@ -120,7 +126,104 @@
   public static final String DEFAULT_TYPE = "word";
 
   private static int MIN_BUFFER_SIZE = 10;
+  
+  private transient Map attributes;
 
+  public Attribute addAttribute(Class attClass) {
+    if (this.attributes == null) {
+      this.attributes = new HashMap();
+    }
+
+    Attribute att = (Attribute) attributes.get(attClass);
+    if (att == null) {
+      try {
+        att = (Attribute) attClass.newInstance();
+      } catch (InstantiationException e) {
+        throw new IllegalArgumentException("Could not instantiate class " + attClass);
+      } catch (IllegalAccessException e) {
+        throw new IllegalArgumentException("Could not instantiate class " + attClass);      
+      }
+      
+      attributes.put(attClass, att);
+    }
+    return att;
+  }
+
+  public boolean hasAttribute(Class attClass) {
+    if (attributes == null) return false;
+    return this.attributes.containsKey(attClass);
+  }
+
+  public Attribute getAttribute(Class attClass) {
+    if (attributes == null) {
+      throw new IllegalArgumentException("This token does not have the attribute '" + attClass + "'.");
+    }
+    Attribute att = (Attribute) this.attributes.get(attClass);
+    if (att == null) {
+      throw new IllegalArgumentException("This token does not have the attribute '" + attClass + "'.");
+    }
+
+    return att;
+  }
+
+  public void copyFrom(Token prototype) {
+    prototype.initTermBuffer();
+    setTermBuffer(prototype.termBuffer, 0, prototype.termLength);
+    if (attributes != null) {
+      Iterator it = prototype.attributes.entrySet().iterator();
+      while (it.hasNext()) {
+        Entry entry = (Entry) it.next();
+        getAttribute((Class) entry.getKey()).copyFrom((Attribute) entry.getValue());
+      }
+    }
+  }
+  
+  
+//  private transient Map<Class<? extends Attribute>, Attribute> attributes;  
+//  public <T extends Attribute> T addAttribute(Class<T> attClass) {
+//    if (this.attributes == null) {
+//      this.attributes = new HashMap<Class<? extends Attribute>, Attribute>();
+//    }
+//
+//    T att = (T) attributes.get(attClass);
+//    if (att == null) {
+//      try {
+//        att = attClass.newInstance();
+//      } catch (InstantiationException e) {
+//        throw new IllegalArgumentException("Could not instantiate class " + attClass);
+//      } catch (IllegalAccessException e) {
+//        throw new IllegalArgumentException("Could not instantiate class " + attClass);      
+//      }
+//      
+//      attributes.put(attClass, att);
+//    }
+//    return att;
+//  }
+//
+//  public boolean hasAttribute(Class<? extends Attribute> attClass) {
+//    return this.attributes.containsKey(attClass);
+//  }
+//
+//  public <T extends Attribute> T getAttribute(Class<T> attClass) {
+//    Attribute att = this.attributes.get(attClass);
+//    if (att == null) {
+//      throw new IllegalArgumentException("This token does not have the attribute '" + attClass + "'.");
+//    }
+//
+//    return (T) att;
+//  }
+//
+//  public void copyFrom(Token prototype) {
+//    prototype.initTermBuffer();
+//    setTermBuffer(prototype.termBuffer, 0, prototype.termLength);
+//    Iterator<Entry<Class<? extends Attribute>, Attribute>> it = prototype.attributes.entrySet().iterator();
+//    while (it.hasNext()) {
+//      Entry<Class<? extends Attribute>, Attribute> entry = it.next();
+//      getAttribute(entry.getKey()).copyFrom(entry.getValue());
+//    }
+//  }
+  
+
   /** @deprecated We will remove this when we remove the
    * deprecated APIs */
   private String termText;
@@ -615,6 +718,16 @@
       sb.append(",type=").append(type);
     if (positionIncrement != 1)
       sb.append(",posIncr=").append(positionIncrement);
+    
+    if (attributes != null) {
+      // TODO Java 1.5
+      //Iterator<Attribute> it = attributes.values().iterator();
+      Iterator it = attributes.values().iterator();
+      while (it.hasNext()) {
+        sb.append(',');
+        sb.append(it.next().toString());
+      }
+    }
     sb.append(')');
     return sb.toString();
   }
@@ -643,6 +756,27 @@
       if (payload != null) {
         t.setPayload((Payload) payload.clone());
       }
+      
+      if (attributes != null) {
+        t.attributes = new HashMap();
+        Iterator it = attributes.entrySet().iterator();
+        while (it.hasNext()) {
+          Entry entry = (Entry) it.next();
+          Attribute clone = (Attribute) ((Attribute) entry.getValue()).clone();
+          t.attributes.put(entry.getKey(), clone);
+        }
+      }
+
+//      TODO Java 1.5      
+//      if (attributes != null) {
+//        t.attributes = new HashMap<Class<? extends Attribute>, Attribute>();
+//        Iterator<Entry<Class<? extends Attribute>, Attribute>> it = attributes.entrySet().iterator();
+//        while (it.hasNext()) {
+//          Entry<Class<? extends Attribute>, Attribute> entry = it.next();
+//          Attribute clone = (Attribute) entry.getValue().clone();
+//          t.attributes.put(entry.getKey(), clone);
+//        }
+//      }
       return t;
     } catch (CloneNotSupportedException e) {
       throw new RuntimeException(e);  // shouldn't happen
@@ -684,6 +818,32 @@
         for(int i=0;i<termLength;i++)
           if (termBuffer[i] != other.termBuffer[i])
             return false;
+        
+        if (attributes != null) {
+          Iterator it = attributes.keySet().iterator();
+          while (it.hasNext()) {
+            Class attName = (Class) it.next();
+            
+            Attribute otherAtt = (Attribute) other.attributes.get(attName);
+            if (otherAtt == null || !otherAtt.equals(attributes.get(attName))) {
+              return false;
+            }
+          }
+        }
+
+//        TODO Java 1.5        
+//        if (attributes != null) {
+//          Iterator<Class<? extends Attribute>> it = attributes.keySet().iterator();
+//          while (it.hasNext()) {
+//            Class<? extends Attribute> attName = it.next();
+//            
+//            Attribute otherAtt = other.attributes.get(attName);
+//            if (otherAtt == null || !otherAtt.equals(attributes.get(attName))) {
+//              return false;
+//            }
+//          }
+//        }
+ 
         return true;
       } else
         return false;
@@ -708,6 +868,15 @@
     code = code * 31 + type.hashCode();
     code = (payload == null ? code : code * 31 + payload.hashCode());
     code = code * 31 + ArrayUtil.hashCode(termBuffer, 0, termLength);
+    if (attributes != null) {
+      // TODO: Java 1.5
+      //Iterator<Attribute> it = attributes.values().iterator();
+      Iterator it = attributes.values().iterator();
+      while (it.hasNext()) {
+        code = code * 31 + it.next().hashCode();
+      }
+    }
+    
     return code;
   }
       
Index: src/java/org/apache/lucene/analysis/TeeTokenFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/TeeTokenFilter.java	(revision 705841)
+++ src/java/org/apache/lucene/analysis/TeeTokenFilter.java	(working copy)
@@ -62,6 +62,16 @@
     this.sink = sink;
   }
 
+  public boolean incrementToken() throws IOException {
+    assert reusableToken != null;
+    if (input.incrementToken()) {
+      sink.add(reusableToken);
+      return true;
+    }
+    return false;
+  }
+
+  /** @deprecated */
   public Token next(final Token reusableToken) throws IOException {
     assert reusableToken != null;
     Token nextToken = input.next(reusableToken);
Index: src/java/org/apache/lucene/analysis/TokenFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/TokenFilter.java	(revision 705841)
+++ src/java/org/apache/lucene/analysis/TokenFilter.java	(working copy)
@@ -34,7 +34,16 @@
   protected TokenFilter(TokenStream input) {
     this.input = input;
   }
-
+  
+  public final Token getToken() throws IOException {
+    if (reusableToken == null) {
+      reusableToken = input.getToken();
+      addTokenAttributes();
+    }
+    
+    return reusableToken;
+  }
+  
   /** Close the input TokenStream. */
   public void close() throws IOException {
     input.close();
Index: src/java/org/apache/lucene/analysis/LengthFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/LengthFilter.java	(revision 705841)
+++ src/java/org/apache/lucene/analysis/LengthFilter.java	(working copy)
@@ -40,9 +40,28 @@
     this.min = min;
     this.max = max;
   }
+  
+  /**
+   * Returns the next input Token whose term() is the right len
+   */
+  public final boolean incrementToken() throws IOException
+  {
+    assert reusableToken != null;
+    // return the first non-stop word found
+    while (input.incrementToken()) {
+      int len = reusableToken.termLength();
+      if (len >= min && len <= max) {
+          return true;
+      }
+      // note: else we ignore it but should we index each part of it?
+    }
+    // reached EOS -- return null
+    return false;
+  }
 
   /**
    * Returns the next input Token whose term() is the right len
+   * @deprecated
    */
   public final Token next(final Token reusableToken) throws IOException
   {
Index: src/java/org/apache/lucene/analysis/ISOLatin1AccentFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/ISOLatin1AccentFilter.java	(revision 705841)
+++ src/java/org/apache/lucene/analysis/ISOLatin1AccentFilter.java	(working copy)
@@ -32,6 +32,28 @@
   private char[] output = new char[256];
   private int outputPos;
 
+  public final boolean incrementToken() throws java.io.IOException {
+    assert reusableToken != null;
+    
+    if (input.incrementToken()) {
+      final char[] buffer = reusableToken.termBuffer();
+      final int length = reusableToken.termLength();
+      // If no characters actually require rewriting then we
+      // just return token as-is:
+      for(int i=0;i<length;i++) {
+        final char c = buffer[i];
+        if (c >= '\u00c0' && c <= '\uFB06') {
+          removeAccents(buffer, length);
+          reusableToken.setTermBuffer(output, 0, outputPos);
+          break;
+        }
+      }
+      return true;
+    } else
+      return false;
+  }
+  
+  /** @deprecated */
   public final Token next(final Token reusableToken) throws java.io.IOException {
     assert reusableToken != null;
     Token nextToken = input.next(reusableToken);
Index: src/java/org/apache/lucene/analysis/LowerCaseFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/LowerCaseFilter.java	(revision 705841)
+++ src/java/org/apache/lucene/analysis/LowerCaseFilter.java	(working copy)
@@ -29,6 +29,22 @@
     super(in);
   }
 
+  public final boolean incrementToken() throws IOException {
+    assert reusableToken != null;
+    
+    if (input.incrementToken()) {
+
+      final char[] buffer = reusableToken.termBuffer();
+      final int length = reusableToken.termLength();
+      for(int i=0;i<length;i++)
+        buffer[i] = Character.toLowerCase(buffer[i]);
+
+      return true;
+    } else
+      return false;
+  }
+  
+  /** @deprecated */
   public final Token next(final Token reusableToken) throws IOException {
     assert reusableToken != null;
     Token nextToken = input.next(reusableToken);
Index: src/java/org/apache/lucene/analysis/StopFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/StopFilter.java	(revision 705841)
+++ src/java/org/apache/lucene/analysis/StopFilter.java	(working copy)
@@ -21,6 +21,8 @@
 import java.util.Arrays;
 import java.util.Set;
 
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+
 /**
  * Removes stop words from a token stream.
  */
@@ -109,10 +111,39 @@
     stopSet.addAll(Arrays.asList(stopWords));
     return stopSet;
   }
+  
+  private PositionIncrementAttribute posIncrAtt = null;
+  
+  protected final void addTokenAttributes() {
+    // TODO Java 1.5
+    //posIncrAtt = reusableToken.addAttribute(PositionIncrementAttribute.class);
+    posIncrAtt = (PositionIncrementAttribute) reusableToken.addAttribute(PositionIncrementAttribute.class);
+  }
 
   /**
    * Returns the next input Token whose term() is not a stop word.
    */
+  public final boolean incrementToken() throws IOException {
+    assert reusableToken != null;
+    // return the first non-stop word found
+    int skippedPositions = 0;
+    while (input.incrementToken()) {
+      if (!stopWords.contains(reusableToken.termBuffer(), 0, reusableToken.termLength())) {
+        if (enablePositionIncrements) {
+          posIncrAtt.setPositionIncrement(posIncrAtt.getPositionIncrement() + skippedPositions);
+        }
+        return true;
+      }
+      skippedPositions += posIncrAtt.getPositionIncrement();
+    }
+    // reached EOS -- return null
+    return false;
+  }
+
+  /**
+   * Returns the next input Token whose term() is not a stop word.
+   * @deprecated
+   */
   public final Token next(final Token reusableToken) throws IOException {
     assert reusableToken != null;
     // return the first non-stop word found
Index: src/java/org/apache/lucene/analysis/TokenStream.java
===================================================================
--- src/java/org/apache/lucene/analysis/TokenStream.java	(revision 705841)
+++ src/java/org/apache/lucene/analysis/TokenStream.java	(working copy)
@@ -37,7 +37,21 @@
   */
 
 public abstract class TokenStream {
-
+  protected Token reusableToken = null;
+  
+  /** @deprecated */
+  private static boolean useNewAPI = false;
+  
+  /** @deprecated */
+  public static boolean isUseNewAPI() {
+    return useNewAPI;
+  }
+  
+  /** @deprecated */
+  public static void setUseNewAPI(boolean use) {
+    useNewAPI = use;
+  }
+  
   /** Returns the next token in the stream, or null at EOS.
    *  @deprecated The returned Token is a "full private copy" (not
    *  re-used across calls to next()) but will be slower
@@ -83,6 +97,9 @@
    *  return; this parameter should never be null (the callee
    *  is not required to check for null before using it, but it is a
    *  good idea to assert that it is not null.)
+   *  @deprecated This method will be removed in Lucene 3.0. Use the
+   *    new {@link #getToken()} and {@link #incrementToken()} APIs 
+   *    instead.
    *  @return next token in the stream or null if end-of-stream was hit
    */
   public Token next(final Token reusableToken) throws IOException {
@@ -90,6 +107,23 @@
     assert reusableToken != null;
     return next();
   }
+  
+  public Token getToken() throws IOException {
+	  if (reusableToken == null) {
+	    reusableToken = new Token();
+	    addTokenAttributes();
+	  }
+	  
+	  return reusableToken;
+  }
+  
+  protected void addTokenAttributes() {
+    // don't add any attributes by default
+  }
+  
+  public boolean incrementToken() throws IOException {
+	  return false;
+  }
 
   /** Resets this stream to the beginning. This is an
    *  optional operation, so subclasses may or may not
Index: src/java/org/apache/lucene/index/DocInverterPerField.java
===================================================================
--- src/java/org/apache/lucene/index/DocInverterPerField.java	(revision 705841)
+++ src/java/org/apache/lucene/index/DocInverterPerField.java	(working copy)
@@ -22,6 +22,8 @@
 import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 
 /**
  * Holds state for inverting all occurrences of a single
@@ -80,6 +82,10 @@
           String stringValue = field.stringValue();
           final int valueLength = stringValue.length();
           Token token = perThread.localToken.reinit(stringValue, fieldState.offset, fieldState.offset + valueLength);
+          if (TokenStream.isUseNewAPI()) {
+            token.addAttribute(OffsetAttribute.class); 
+            consumer.setToken(token);
+          }
           boolean success = false;
           try {
             consumer.add(token);
@@ -122,7 +128,25 @@
 
           try {
             int offsetEnd = fieldState.offset-1;
-            final Token localToken = perThread.localToken;
+            
+            boolean useNewTokenStreamAPI = TokenStream.isUseNewAPI();
+            Token localToken = null;
+            OffsetAttribute offsetAttribute = null;
+            PositionIncrementAttribute posIncrAttribute = null;
+            
+            if (useNewTokenStreamAPI) { 
+             localToken = stream.getToken();
+             if (localToken.hasAttribute(OffsetAttribute.class)) {
+               offsetAttribute = (OffsetAttribute) localToken.getAttribute(OffsetAttribute.class);
+             }
+             if (localToken.hasAttribute(PositionIncrementAttribute.class)) {
+               posIncrAttribute = (PositionIncrementAttribute) localToken.getAttribute(PositionIncrementAttribute.class);
+             }
+             consumer.setToken(localToken);
+            } else {
+              localToken = perThread.localToken;
+            }         
+            
             for(;;) {
 
               // If we hit an exception in stream.next below
@@ -131,10 +155,37 @@
               // non-aborting and (above) this one document
               // will be marked as deleted, but still
               // consume a docID
-              Token token = stream.next(localToken);
+              Token token;
+              if (useNewTokenStreamAPI) {
+                if (stream.incrementToken()) {
+                  token = localToken;
+                } else {
+                  token = null;
+                }
+              } else {
+                token = stream.next(localToken);
+              }
 
               if (token == null) break;
-              fieldState.position += (token.getPositionIncrement() - 1);
+              
+              int positionIncrement = 1;
+              int startOffset = 0;
+              int endOffset = 0;
+              if (useNewTokenStreamAPI) {
+                  if (posIncrAttribute != null) {
+                    positionIncrement = posIncrAttribute.getPositionIncrement();
+                  }
+                  if (offsetAttribute != null) {
+                    startOffset = offsetAttribute.startOffset();
+                    endOffset = offsetAttribute.endOffset();
+                  }
+              } else {
+                positionIncrement = token.getPositionIncrement();
+                startOffset = token.startOffset();
+                endOffset = token.endOffset();
+              }
+              
+              fieldState.position += (positionIncrement - 1);
               boolean success = false;
               try {
                 // If we hit an exception in here, we abort
@@ -150,7 +201,7 @@
                   docState.docWriter.setAborting();
               }
               fieldState.position++;
-              offsetEnd = fieldState.offset + token.endOffset();
+              offsetEnd = fieldState.offset + endOffset;
               if (++fieldState.length >= maxFieldLength) {
                 if (docState.infoStream != null)
                   docState.infoStream.println("maxFieldLength " +maxFieldLength+ " reached for field " + fieldInfo.name + ", ignoring following tokens");
Index: src/java/org/apache/lucene/index/TermsHashPerField.java
===================================================================
--- src/java/org/apache/lucene/index/TermsHashPerField.java	(revision 705841)
+++ src/java/org/apache/lucene/index/TermsHashPerField.java	(working copy)
@@ -49,7 +49,7 @@
   private int postingsHashMask = postingsHashSize-1;
   private RawPostingList[] postingsHash = new RawPostingList[postingsHashSize];
   private RawPostingList p;
-
+  
   public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHashPerThread perThread, final TermsHashPerThread nextPerThread, final FieldInfo fieldInfo) {
     this.perThread = perThread;
     intPool = perThread.intPool;
@@ -247,6 +247,13 @@
   private boolean doCall;
   private boolean doNextCall;
 
+  void setToken(Token t) {
+    consumer.setToken(t);
+    if (nextPerField != null) {
+      nextPerField.setToken(t);
+    }
+  }
+  
   boolean start(Fieldable[] fields, int count) throws IOException {
     doCall = consumer.start(fields, count);
     if (nextPerField != null)
Index: src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
===================================================================
--- src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java	(revision 705841)
+++ src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java	(working copy)
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 
 // TODO: break into separate freq and prox writers as
 // codecs; make separate container (tii/tis/skip/*) that can
@@ -32,6 +33,9 @@
   final DocumentsWriter.DocState docState;
   final DocInverter.FieldInvertState fieldState;
   boolean omitTf;
+  
+  Token token;
+  PayloadAttribute payloadAttribute; 
 
   public FreqProxTermsWriterPerField(TermsHashPerField termsHashPerField, FreqProxTermsWriterPerThread perThread, FieldInfo fieldInfo) {
     this.termsHashPerField = termsHashPerField;
@@ -64,6 +68,8 @@
     // Record, up front, whether our in-RAM format will be
     // with or without term freqs:
     omitTf = fieldInfo.omitTf;
+    token = null;
+    payloadAttribute = null;
   }
 
   boolean start(Fieldable[] fields, int count) {
@@ -72,9 +78,28 @@
         return true;
     return false;
   }     
+  
+  void setToken(Token t) {
+    token = t;
+    if (token.hasAttribute(PayloadAttribute.class)) {
+      payloadAttribute = (PayloadAttribute) token.getAttribute(PayloadAttribute.class);
+    } else {
+      payloadAttribute = null;
+    }
+  }
 
   final void writeProx(Token t, FreqProxTermsWriter.PostingList p, int proxCode) {
-    final Payload payload = t.getPayload();    
+    final Payload payload;
+    if (token != null) {
+      if (payloadAttribute == null) {
+        payload = null;
+      } else {
+        payload = payloadAttribute.getPayload();
+      }
+    } else {
+      payload = t.getPayload();
+    }
+    
     if (payload != null && payload.length > 0) {
       termsHashPerField.writeVInt(1, (proxCode<<1)|1);
       termsHashPerField.writeVInt(1, payload.length);
Index: src/java/org/apache/lucene/index/InvertedDocConsumerPerField.java
===================================================================
--- src/java/org/apache/lucene/index/InvertedDocConsumerPerField.java	(revision 705841)
+++ src/java/org/apache/lucene/index/InvertedDocConsumerPerField.java	(working copy)
@@ -29,6 +29,8 @@
   // fields:
   abstract boolean start(Fieldable[] fields, int count) throws IOException;
 
+  abstract void setToken(Token token);
+  
   // Called once per inverted token
   abstract void add(Token token) throws IOException;
 
Index: src/java/org/apache/lucene/index/TermsHashConsumerPerField.java
===================================================================
--- src/java/org/apache/lucene/index/TermsHashConsumerPerField.java	(revision 705841)
+++ src/java/org/apache/lucene/index/TermsHashConsumerPerField.java	(working copy)
@@ -30,6 +30,7 @@
   abstract boolean start(Fieldable[] fields, int count) throws IOException;
   abstract void finish() throws IOException;
   abstract void skippingLongTerm(Token t) throws IOException;
+  abstract void setToken(Token t);
   abstract void newTerm(Token t, RawPostingList p) throws IOException;
   abstract void addTerm(Token t, RawPostingList p) throws IOException;
   abstract int getStreamCount();
Index: src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java	(revision 705841)
+++ src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java	(working copy)
@@ -20,6 +20,8 @@
 import java.io.IOException;
 import org.apache.lucene.util.UnicodeUtil;
 import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.store.IndexOutput;
 
@@ -38,6 +40,9 @@
 
   int maxNumPostings;
 
+  Token token = null;
+  OffsetAttribute offsetAttribute = null;
+  
   public TermVectorsTermsWriterPerField(TermsHashPerField termsHashPerField, TermVectorsTermsWriterPerThread perThread, FieldInfo fieldInfo) {
     this.termsHashPerField = termsHashPerField;
     this.perThread = perThread;
@@ -191,6 +196,15 @@
     termsHashPerField.shrinkHash(maxNumPostings);
     maxNumPostings = 0;
   }
+  
+  void setToken(Token t) {
+    token = t;
+    if (doVectorOffsets && token.hasAttribute(OffsetAttribute.class)) {
+      offsetAttribute = (OffsetAttribute) token.getAttribute(OffsetAttribute.class);
+    } else {
+      offsetAttribute = null;
+    }
+  }
 
   void newTerm(Token t, RawPostingList p0) {
 
@@ -201,8 +215,17 @@
     p.freq = 1;
 
     if (doVectorOffsets) {
-      final int startOffset = fieldState.offset + t.startOffset();
-      final int endOffset = fieldState.offset + t.endOffset();
+      int startOffset = fieldState.offset;
+      int endOffset = fieldState.offset;
+      
+      if (offsetAttribute != null) {
+        startOffset += offsetAttribute.startOffset();
+        endOffset += offsetAttribute.endOffset();
+      } else {
+        startOffset += t.startOffset();
+        endOffset += t.endOffset();
+      }
+      
       termsHashPerField.writeVInt(1, startOffset);
       termsHashPerField.writeVInt(1, endOffset - startOffset);
       p.lastOffset = endOffset;
@@ -222,8 +245,16 @@
     p.freq++;
 
     if (doVectorOffsets) {
-      final int startOffset = fieldState.offset + t.startOffset();
-      final int endOffset = fieldState.offset + t.endOffset();
+      int startOffset = fieldState.offset;
+      int endOffset = fieldState.offset;
+      
+      if (offsetAttribute != null) {
+        startOffset += offsetAttribute.startOffset();
+        endOffset += offsetAttribute.endOffset();
+      } else {
+        startOffset += t.startOffset();
+        endOffset += t.endOffset();
+      }
       termsHashPerField.writeVInt(1, startOffset - p.lastOffset);
       termsHashPerField.writeVInt(1, endOffset - startOffset);
       p.lastOffset = endOffset;
