Index: src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttribute.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttribute.java	(revision 784591)
+++ src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttribute.java	(working copy)
@@ -17,10 +17,7 @@
  * limitations under the License.
  */
 
-import java.io.Serializable;
 
-import org.apache.lucene.util.Attribute;
-
 /**
  * This attribute can be used to pass different flags down the tokenizer chain,
  * e. g. from one TokenFilter to another one. 
@@ -31,9 +28,7 @@
  * We will make our best efforts to keep the APIs backwards-compatible.</font>
 
  */
-public class FlagsAttribute extends Attribute implements Cloneable, Serializable {
-  private int flags = 0;
-  
+public interface FlagsAttribute extends TokenAttribute {
   /**
    * EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
    * <p/>
@@ -44,43 +39,10 @@
    *
    * @return The bits
    */
-  public int getFlags() {
-    return flags;
-  }
+  public int getFlags();
 
   /**
    * @see #getFlags()
    */
-  public void setFlags(int flags) {
-    this.flags = flags;
-  }
-  
-  public void clear() {
-    flags = 0;
-  }
-
-  public String toString() {
-    return "flags=" + flags;
-  }
-
-  public boolean equals(Object other) {
-    if (this == other) {
-      return true;
-    }
-    
-    if (other instanceof FlagsAttribute) {
-      return ((FlagsAttribute) other).flags == flags;
-    }
-    
-    return false;
-  }
-
-  public int hashCode() {
-    return flags;
-  }
-  
-  public void copyTo(Attribute target) {
-    FlagsAttribute t = (FlagsAttribute) target;
-    t.setFlags(flags);
-  }
+  public void setFlags(int flags);  
 }
Index: src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttributeImpl.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttributeImpl.java	(revision 0)
+++ src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttributeImpl.java	(revision 0)
@@ -0,0 +1,82 @@
+package org.apache.lucene.analysis.tokenattributes;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Serializable;
+
+import org.apache.lucene.util.AttributeImpl;
+
+/**
+ * This attribute can be used to pass different flags down the tokenizer chain,
+ * e. g. from one TokenFilter to another one. 
+ * 
+ * <p><font color="#FF0000">
+ * WARNING: The status of the new TokenStream, AttributeSource and Attributes is experimental. 
+ * The APIs introduced in these classes with Lucene 2.9 might change in the future. 
+ * We will make our best efforts to keep the APIs backwards-compatible.</font>
+
+ */
+public class FlagsAttributeImpl extends AttributeImpl implements FlagsAttribute, Cloneable, Serializable {
+  private int flags = 0;
+  
+  /**
+   * EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
+   * <p/>
+   *
+   * Get the bitset for any bits that have been set.  This is completely distinct from {@link TypeAttribute#type()}, although they do share similar purposes.
+   * The flags can be used to encode information about the token for use by other {@link org.apache.lucene.analysis.TokenFilter}s.
+   *
+   *
+   * @return The bits
+   */
+  public int getFlags() {
+    return flags;
+  }
+
+  /**
+   * @see #getFlags()
+   */
+  public void setFlags(int flags) {
+    this.flags = flags;
+  }
+  
+  public void clear() {
+    flags = 0;
+  }
+
+  public boolean equals(Object other) {
+    if (this == other) {
+      return true;
+    }
+    
+    if (other instanceof FlagsAttributeImpl) {
+      return ((FlagsAttributeImpl) other).flags == flags;
+    }
+    
+    return false;
+  }
+
+  public int hashCode() {
+    return flags;
+  }
+  
+  public void copyTo(AttributeImpl target) {
+    FlagsAttribute t = (FlagsAttribute) target;
+    t.setFlags(flags);
+  }
+}
Index: src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttribute.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttribute.java	(revision 784591)
+++ src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttribute.java	(working copy)
@@ -17,10 +17,7 @@
  * limitations under the License.
  */
 
-import java.io.Serializable;
 
-import org.apache.lucene.util.Attribute;
-
 /**
  * The start and end character offset of a Token. 
  * 
@@ -29,67 +26,23 @@
  * The APIs introduced in these classes with Lucene 2.9 might change in the future. 
  * We will make our best efforts to keep the APIs backwards-compatible.</font>
  */
-public class OffsetAttribute extends Attribute implements Cloneable, Serializable {
-  private int startOffset;
-  private int endOffset;
-
+public interface OffsetAttribute extends TokenAttribute {
   /** Returns this Token's starting offset, the position of the first character
   corresponding to this token in the source text.
 
   Note that the difference between endOffset() and startOffset() may not be
   equal to termText.length(), as the term text may have been altered by a
   stemmer or some other filter. */
-  public int startOffset() {
-    return startOffset;
-  }
+  public int startOffset();
 
   
   /** Set the starting and ending offset.
     @see #startOffset() and #endOffset()*/
-  public void setOffset(int startOffset, int endOffset) {
-    this.startOffset = startOffset;
-    this.endOffset = endOffset;
-  }
+  public void setOffset(int startOffset, int endOffset);
   
 
   /** Returns this Token's ending offset, one greater than the position of the
   last character corresponding to this token in the source text. The length
   of the token in the source text is (endOffset - startOffset). */
-  public int endOffset() {
-    return endOffset;
-  }
-
-
-  public void clear() {
-    startOffset = 0;
-    endOffset = 0;
-  }
-  
-  public String toString() {
-    return "start=" + startOffset + ",end=" + endOffset;
-  }
-  
-  public boolean equals(Object other) {
-    if (other == this) {
-      return true;
-    }
-    
-    if (other instanceof OffsetAttribute) {
-      OffsetAttribute o = (OffsetAttribute) other;
-      return o.startOffset == startOffset && o.endOffset == endOffset;
-    }
-    
-    return false;
-  }
-
-  public int hashCode() {
-    int code = startOffset;
-    code = code * 31 + endOffset;
-    return code;
-  } 
-  
-  public void copyTo(Attribute target) {
-    OffsetAttribute t = (OffsetAttribute) target;
-    t.setOffset(startOffset, endOffset);
-  }  
+  public int endOffset();
 }
Index: src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java	(revision 0)
+++ src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java	(revision 0)
@@ -0,0 +1,91 @@
+package org.apache.lucene.analysis.tokenattributes;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Serializable;
+
+import org.apache.lucene.util.AttributeImpl;
+
+/**
+ * The start and end character offset of a Token. 
+ * 
+ * <p><font color="#FF0000">
+ * WARNING: The status of the new TokenStream, AttributeSource and Attributes is experimental. 
+ * The APIs introduced in these classes with Lucene 2.9 might change in the future. 
+ * We will make our best efforts to keep the APIs backwards-compatible.</font>
+ */
+public class OffsetAttributeImpl extends AttributeImpl implements OffsetAttribute, Cloneable, Serializable {
+  private int startOffset;
+  private int endOffset;
+
+  /** Returns this Token's starting offset, the position of the first character
+  corresponding to this token in the source text.
+
+  Note that the difference between endOffset() and startOffset() may not be
+  equal to termText.length(), as the term text may have been altered by a
+  stemmer or some other filter. */
+  public int startOffset() {
+    return startOffset;
+  }
+
+  
+  /** Set the starting and ending offset.
+    @see #startOffset() and #endOffset()*/
+  public void setOffset(int startOffset, int endOffset) {
+    this.startOffset = startOffset;
+    this.endOffset = endOffset;
+  }
+  
+
+  /** Returns this Token's ending offset, one greater than the position of the
+  last character corresponding to this token in the source text. The length
+  of the token in the source text is (endOffset - startOffset). */
+  public int endOffset() {
+    return endOffset;
+  }
+
+
+  public void clear() {
+    startOffset = 0;
+    endOffset = 0;
+  }
+  
+  public boolean equals(Object other) {
+    if (other == this) {
+      return true;
+    }
+    
+    if (other instanceof OffsetAttributeImpl) {
+      OffsetAttributeImpl o = (OffsetAttributeImpl) other;
+      return o.startOffset == startOffset && o.endOffset == endOffset;
+    }
+    
+    return false;
+  }
+
+  public int hashCode() {
+    int code = startOffset;
+    code = code * 31 + endOffset;
+    return code;
+  } 
+  
+  public void copyTo(AttributeImpl target) {
+    OffsetAttribute t = (OffsetAttribute) target;
+    t.setOffset(startOffset, endOffset);
+  }  
+}
Index: src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java	(revision 784591)
+++ src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java	(working copy)
@@ -17,10 +17,7 @@
  * limitations under the License.
  */
 
-import java.io.Serializable;
-
 import org.apache.lucene.index.Payload;
-import org.apache.lucene.util.Attribute;
 
 /**
  * The payload of a Token. See also {@link Payload}.
@@ -30,80 +27,14 @@
  * The APIs introduced in these classes with Lucene 2.9 might change in the future. 
  * We will make our best efforts to keep the APIs backwards-compatible.</font>
  */
-public class PayloadAttribute extends Attribute implements Cloneable, Serializable {
-  private Payload payload;  
-  
+public interface PayloadAttribute extends TokenAttribute {
   /**
-   * Initialize this attribute with no payload.
-   */
-  public PayloadAttribute() {}
-  
-  /**
-   * Initialize this attribute with the given payload. 
-   */
-  public PayloadAttribute(Payload payload) {
-    this.payload = payload;
-  }
-  
-  /**
    * Returns this Token's payload.
    */ 
-  public Payload getPayload() {
-    return this.payload;
-  }
+  public Payload getPayload();
 
   /** 
    * Sets this Token's payload.
    */
-  public void setPayload(Payload payload) {
-    this.payload = payload;
-  }
-  
-  public void clear() {
-    payload = null;
-  }
-
-  public String toString() {
-    if (payload == null) {
-      return "payload=null";
-    } 
-    
-    return "payload=" + payload.toString();
-  }
-  
-  public Object clone()  {
-    PayloadAttribute clone = (PayloadAttribute) super.clone();
-    if (payload != null) {
-      clone.payload = (Payload) payload.clone();
-    }
-    return clone;
-  }
-
-  public boolean equals(Object other) {
-    if (other == this) {
-      return true;
-    }
-    
-    if (other instanceof PayloadAttribute) {
-      PayloadAttribute o = (PayloadAttribute) other;
-      if (o.payload == null || payload == null) {
-        return o.payload == null && payload == null;
-      }
-      
-      return o.payload.equals(payload);
-    }
-    
-    return false;
-  }
-
-  public int hashCode() {
-    return (payload == null) ? 0 : payload.hashCode();
-  }
-
-  public void copyTo(Attribute target) {
-    PayloadAttribute t = (PayloadAttribute) target;
-    t.setPayload((payload == null) ? null : (Payload) payload.clone());
-  }  
-
-  
+  public void setPayload(Payload payload);
 }
Index: src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttributeImpl.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttributeImpl.java	(revision 0)
+++ src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttributeImpl.java	(revision 0)
@@ -0,0 +1,101 @@
+package org.apache.lucene.analysis.tokenattributes;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Serializable;
+
+import org.apache.lucene.index.Payload;
+import org.apache.lucene.util.AttributeImpl;
+
+/**
+ * The payload of a Token. See also {@link Payload}.
+ * 
+ * <p><font color="#FF0000">
+ * WARNING: The status of the new TokenStream, AttributeSource and Attributes is experimental. 
+ * The APIs introduced in these classes with Lucene 2.9 might change in the future. 
+ * We will make our best efforts to keep the APIs backwards-compatible.</font>
+ */
+public class PayloadAttributeImpl extends AttributeImpl implements PayloadAttribute, Cloneable, Serializable {
+  private Payload payload;  
+  
+  /**
+   * Initialize this attribute with no payload.
+   */
+  public PayloadAttributeImpl() {}
+  
+  /**
+   * Initialize this attribute with the given payload. 
+   */
+  public PayloadAttributeImpl(Payload payload) {
+    this.payload = payload;
+  }
+  
+  /**
+   * Returns this Token's payload.
+   */ 
+  public Payload getPayload() {
+    return this.payload;
+  }
+
+  /** 
+   * Sets this Token's payload.
+   */
+  public void setPayload(Payload payload) {
+    this.payload = payload;
+  }
+  
+  public void clear() {
+    payload = null;
+  }
+
+  public Object clone()  {
+    PayloadAttributeImpl clone = (PayloadAttributeImpl) super.clone();
+    if (payload != null) {
+      clone.payload = (Payload) payload.clone();
+    }
+    return clone;
+  }
+
+  public boolean equals(Object other) {
+    if (other == this) {
+      return true;
+    }
+    
+    if (other instanceof PayloadAttribute) {
+      PayloadAttributeImpl o = (PayloadAttributeImpl) other;
+      if (o.payload == null || payload == null) {
+        return o.payload == null && payload == null;
+      }
+      
+      return o.payload.equals(payload);
+    }
+    
+    return false;
+  }
+
+  public int hashCode() {
+    return (payload == null) ? 0 : payload.hashCode();
+  }
+
+  public void copyTo(AttributeImpl target) {
+    PayloadAttribute t = (PayloadAttribute) target;
+    t.setPayload((payload == null) ? null : (Payload) payload.clone());
+  }  
+
+  
+}
Index: src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java	(revision 784591)
+++ src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java	(working copy)
@@ -20,7 +20,7 @@
 import java.io.Serializable;
 
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.util.Attribute;
+import org.apache.lucene.util.AttributeImpl;
 
 /** The positionIncrement determines the position of this token
  * relative to the previous Token in a {@link TokenStream}, used in phrase
@@ -53,54 +53,15 @@
  * 
  * @see org.apache.lucene.index.TermPositions
  */
-public class PositionIncrementAttribute extends Attribute implements Cloneable, Serializable {
-  private int positionIncrement = 1;
-  
+public interface PositionIncrementAttribute extends TokenAttribute {
   /** Set the position increment. The default value is one.
    *
    * @param positionIncrement the distance from the prior term
    */
-  public void setPositionIncrement(int positionIncrement) {
-    if (positionIncrement < 0)
-      throw new IllegalArgumentException
-        ("Increment must be zero or greater: " + positionIncrement);
-    this.positionIncrement = positionIncrement;
-  }
+  public void setPositionIncrement(int positionIncrement);
 
   /** Returns the position increment of this Token.
    * @see #setPositionIncrement
    */
-  public int getPositionIncrement() {
-    return positionIncrement;
-  }
-
-  public void clear() {
-    this.positionIncrement = 1;
-  }
-  
-  public String toString() {
-    return "positionIncrement=" + positionIncrement;
-  }
-
-  public boolean equals(Object other) {
-    if (other == this) {
-      return true;
-    }
-    
-    if (other instanceof PositionIncrementAttribute) {
-      return positionIncrement == ((PositionIncrementAttribute) other).positionIncrement;
-    }
- 
-    return false;
-  }
-
-  public int hashCode() {
-    return positionIncrement;
-  }
-  
-  public void copyTo(Attribute target) {
-    PositionIncrementAttribute t = (PositionIncrementAttribute) target;
-    t.setPositionIncrement(positionIncrement);
-  }  
-
+  public int getPositionIncrement();
 }
Index: src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java	(revision 0)
+++ src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java	(revision 0)
@@ -0,0 +1,102 @@
+package org.apache.lucene.analysis.tokenattributes;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Serializable;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.util.AttributeImpl;
+
+/** The positionIncrement determines the position of this token
+ * relative to the previous Token in a {@link TokenStream}, used in phrase
+ * searching.
+ *
+ * <p>The default value is one.
+ *
+ * <p>Some common uses for this are:<ul>
+ *
+ * <li>Set it to zero to put multiple terms in the same position.  This is
+ * useful if, e.g., a word has multiple stems.  Searches for phrases
+ * including either stem will match.  In this case, all but the first stem's
+ * increment should be set to zero: the increment of the first instance
+ * should be one.  Repeating a token with an increment of zero can also be
+ * used to boost the scores of matches on that token.
+ *
+ * <li>Set it to values greater than one to inhibit exact phrase matches.
+ * If, for example, one does not want phrases to match across removed stop
+ * words, then one could build a stop word filter that removes stop words and
+ * also sets the increment to the number of stop words removed before each
+ * non-stop word.  Then exact phrase queries will only match when the terms
+ * occur with no intervening stop words.
+ *
+ * </ul>
+ * 
+ * <p><font color="#FF0000">
+ * WARNING: The status of the new TokenStream, AttributeSource and Attributes is experimental. 
+ * The APIs introduced in these classes with Lucene 2.9 might change in the future. 
+ * We will make our best efforts to keep the APIs backwards-compatible.</font>
+ * 
+ * @see org.apache.lucene.index.TermPositions
+ */
+public class PositionIncrementAttributeImpl extends AttributeImpl implements PositionIncrementAttribute, Cloneable, Serializable {
+  private int positionIncrement = 1;
+  
+  /** Set the position increment. The default value is one.
+   *
+   * @param positionIncrement the distance from the prior term
+   */
+  public void setPositionIncrement(int positionIncrement) {
+    if (positionIncrement < 0)
+      throw new IllegalArgumentException
+        ("Increment must be zero or greater: " + positionIncrement);
+    this.positionIncrement = positionIncrement;
+  }
+
+  /** Returns the position increment of this Token.
+   * @see #setPositionIncrement
+   */
+  public int getPositionIncrement() {
+    return positionIncrement;
+  }
+
+  public void clear() {
+    this.positionIncrement = 1;
+  }
+  
+  public boolean equals(Object other) {
+    if (other == this) {
+      return true;
+    }
+    
+    if (other instanceof PositionIncrementAttributeImpl) {
+      return positionIncrement == ((PositionIncrementAttributeImpl) other).positionIncrement;
+    }
+ 
+    return false;
+  }
+
+  public int hashCode() {
+    return positionIncrement;
+  }
+  
+  public void copyTo(AttributeImpl target) {
+    PositionIncrementAttribute t = (PositionIncrementAttribute) target;
+    t.setPositionIncrement(positionIncrement);
+  }  
+
+}
Index: src/java/org/apache/lucene/analysis/tokenattributes/TermAttribute.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/TermAttribute.java	(revision 784591)
+++ src/java/org/apache/lucene/analysis/tokenattributes/TermAttribute.java	(working copy)
@@ -17,11 +17,7 @@
  * limitations under the License.
  */
 
-import java.io.Serializable;
 
-import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.Attribute;
-
 /**
  * The term text of a Token.
  * 
@@ -30,12 +26,7 @@
  * The APIs introduced in these classes with Lucene 2.9 might change in the future. 
  * We will make our best efforts to keep the APIs backwards-compatible.</font>
  */
-public class TermAttribute extends Attribute implements Cloneable, Serializable {
-  private static int MIN_BUFFER_SIZE = 10;
-  
-  private char[] termBuffer;
-  private int termLength;
-  
+public interface TermAttribute extends TokenAttribute {
   /** Returns the Token's term text.
    * 
    * This method has a performance penalty
@@ -45,38 +36,20 @@
    * String, use this method, which is nothing more than
    * a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
    */
-  public String term() {
-    initTermBuffer();
-    return new String(termBuffer, 0, termLength);
-  }
-
+  public String term();
+  
   /** Copies the contents of buffer, starting at offset for
    *  length characters, into the termBuffer array.
    *  @param buffer the buffer to copy
    *  @param offset the index in the buffer of the first character to copy
    *  @param length the number of characters to copy
    */
-  public void setTermBuffer(char[] buffer, int offset, int length) {
-    char[] newCharBuffer = growTermBuffer(length);
-    if (newCharBuffer != null) {
-      termBuffer = newCharBuffer;
-    }
-    System.arraycopy(buffer, offset, termBuffer, 0, length);
-    termLength = length;
-  }
+  public void setTermBuffer(char[] buffer, int offset, int length);
 
   /** Copies the contents of buffer into the termBuffer array.
    *  @param buffer the buffer to copy
    */
-  public void setTermBuffer(String buffer) {
-    int length = buffer.length();
-    char[] newCharBuffer = growTermBuffer(length);
-    if (newCharBuffer != null) {
-      termBuffer = newCharBuffer;
-    }
-    buffer.getChars(0, length, termBuffer, 0);
-    termLength = length;
-  }
+  public void setTermBuffer(String buffer);
 
   /** Copies the contents of buffer, starting at offset and continuing
    *  for length characters, into the termBuffer array.
@@ -84,17 +57,8 @@
    *  @param offset the index in the buffer of the first character to copy
    *  @param length the number of characters to copy
    */
-  public void setTermBuffer(String buffer, int offset, int length) {
-    assert offset <= buffer.length();
-    assert offset + length <= buffer.length();
-    char[] newCharBuffer = growTermBuffer(length);
-    if (newCharBuffer != null) {
-      termBuffer = newCharBuffer;
-    }
-    buffer.getChars(offset, offset + length, termBuffer, 0);
-    termLength = length;
-  }
-
+  public void setTermBuffer(String buffer, int offset, int length);
+  
   /** Returns the internal termBuffer character array which
    *  you can then directly alter.  If the array is too
    *  small for your token, use {@link
@@ -102,10 +66,7 @@
    *  altering the buffer be sure to call {@link
    *  #setTermLength} to record the number of valid
    *  characters that were placed into the termBuffer. */
-  public char[] termBuffer() {
-    initTermBuffer();
-    return termBuffer;
-  }
+  public char[] termBuffer();
 
   /** Grows the termBuffer to at least size newSize, preserving the
    *  existing content. Note: If the next operation is to change
@@ -117,63 +78,12 @@
    *  @param newSize minimum size of the new termBuffer
    *  @return newly created termBuffer with length >= newSize
    */
-  public char[] resizeTermBuffer(int newSize) {
-    char[] newCharBuffer = growTermBuffer(newSize);
-    if (termBuffer == null) {
-      // If there were termText, then preserve it.
-      // note that if termBuffer is null then newCharBuffer cannot be null
-      assert newCharBuffer != null;
-      termBuffer = newCharBuffer;
-    } else if (newCharBuffer != null) {
-      // Note: if newCharBuffer != null then termBuffer needs to grow.
-      // If there were a termBuffer, then preserve it
-      System.arraycopy(termBuffer, 0, newCharBuffer, 0, termBuffer.length);
-      termBuffer = newCharBuffer;      
-    }
-    return termBuffer;
-  }
+  public char[] resizeTermBuffer(int newSize);
 
-  /** Allocates a buffer char[] of at least newSize
-   *  @param newSize minimum size of the buffer
-   *  @return newly created buffer with length >= newSize or null if the current termBuffer is big enough
-   */
-  private char[] growTermBuffer(int newSize) {
-    if (termBuffer != null) {
-      if (termBuffer.length >= newSize)
-        // Already big enough
-        return null;
-      else
-        // Not big enough; create a new array with slight
-        // over allocation:
-        return new char[ArrayUtil.getNextSize(newSize)];
-    } else {
-
-      // determine the best size
-      // The buffer is always at least MIN_BUFFER_SIZE
-      if (newSize < MIN_BUFFER_SIZE) {
-        newSize = MIN_BUFFER_SIZE;
-      }
-
-      return new char[newSize];
-    }
-  }
-
-  // TODO: once we remove the deprecated termText() method
-  // and switch entirely to char[] termBuffer we don't need
-  // to use this method anymore
-  private void initTermBuffer() {
-    if (termBuffer == null) {
-        termBuffer = new char[MIN_BUFFER_SIZE];
-        termLength = 0;
-    }
-  }
-
   /** Return number of valid characters (length of the term)
    *  in the termBuffer array. */
-  public int termLength() {
-    return termLength;
-  }
-
+  public int termLength();
+  
   /** Set number of valid characters (length of the term) in
    *  the termBuffer array. Use this to truncate the termBuffer
    *  or to synchronize with external manipulation of the termBuffer.
@@ -181,61 +91,5 @@
    *  use {@link #resizeTermBuffer(int)} first.
    *  @param length the truncated length
    */
-  public void setTermLength(int length) {
-    initTermBuffer();
-    if (length > termBuffer.length)
-      throw new IllegalArgumentException("length " + length + " exceeds the size of the termBuffer (" + termBuffer.length + ")");
-    termLength = length;
-  }
-
-  public int hashCode() {
-    initTermBuffer();
-    int code = termLength;
-    code = code * 31 + ArrayUtil.hashCode(termBuffer, 0, termLength);
-    return code;
-  }
-
-  public void clear() {
-    termLength = 0;    
-  }
-
-  public Object clone() {
-    TermAttribute t = (TermAttribute)super.clone();
-    // Do a deep clone
-    if (termBuffer != null) {
-      t.termBuffer = (char[]) termBuffer.clone();
-    }
-    return t;
-  }
-  
-  public boolean equals(Object other) {
-    if (other == this) {
-      return true;
-    }
-    
-    if (other instanceof TermAttribute) {
-      initTermBuffer();
-      TermAttribute o = ((TermAttribute) other);
-      o.initTermBuffer();
-      
-      for(int i=0;i<termLength;i++) {
-        if (termBuffer[i] != o.termBuffer[i]) {
-          return false;
-        }
-      }
-      return true;
-    }
-    
-    return false;
-  }
-
-  public String toString() {
-    initTermBuffer();
-    return "term=" + new String(termBuffer, 0, termLength);
-  }
-  
-  public void copyTo(Attribute target) {
-    TermAttribute t = (TermAttribute) target;
-    t.setTermBuffer(termBuffer, 0, termLength);
-  }
+  public void setTermLength(int length);
 }
Index: src/java/org/apache/lucene/analysis/tokenattributes/TermAttributeImpl.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/TermAttributeImpl.java	(revision 0)
+++ src/java/org/apache/lucene/analysis/tokenattributes/TermAttributeImpl.java	(revision 0)
@@ -0,0 +1,241 @@
+package org.apache.lucene.analysis.tokenattributes;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Serializable;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.AttributeImpl;
+
+/**
+ * The term text of a Token.
+ * 
+ * <p><font color="#FF0000">
+ * WARNING: The status of the new TokenStream, AttributeSource and Attributes is experimental. 
+ * The APIs introduced in these classes with Lucene 2.9 might change in the future. 
+ * We will make our best efforts to keep the APIs backwards-compatible.</font>
+ */
+public class TermAttributeImpl extends AttributeImpl implements TermAttribute, Cloneable, Serializable {
+  private static int MIN_BUFFER_SIZE = 10;
+  
+  private char[] termBuffer;
+  private int termLength;
+  
+  /** Returns the Token's term text.
+   * 
+   * This method has a performance penalty
+   * because the text is stored internally in a char[].  If
+   * possible, use {@link #termBuffer()} and {@link
+   * #termLength()} directly instead.  If you really need a
+   * String, use this method, which is nothing more than
+   * a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
+   */
+  public String term() {
+    initTermBuffer();
+    return new String(termBuffer, 0, termLength);
+  }
+
+  /** Copies the contents of buffer, starting at offset for
+   *  length characters, into the termBuffer array.
+   *  @param buffer the buffer to copy
+   *  @param offset the index in the buffer of the first character to copy
+   *  @param length the number of characters to copy
+   */
+  public void setTermBuffer(char[] buffer, int offset, int length) {
+    char[] newCharBuffer = growTermBuffer(length);
+    if (newCharBuffer != null) {
+      termBuffer = newCharBuffer;
+    }
+    System.arraycopy(buffer, offset, termBuffer, 0, length);
+    termLength = length;
+  }
+
+  /** Copies the contents of buffer into the termBuffer array.
+   *  @param buffer the buffer to copy
+   */
+  public void setTermBuffer(String buffer) {
+    int length = buffer.length();
+    char[] newCharBuffer = growTermBuffer(length);
+    if (newCharBuffer != null) {
+      termBuffer = newCharBuffer;
+    }
+    buffer.getChars(0, length, termBuffer, 0);
+    termLength = length;
+  }
+
+  /** Copies the contents of buffer, starting at offset and continuing
+   *  for length characters, into the termBuffer array.
+   *  @param buffer the buffer to copy
+   *  @param offset the index in the buffer of the first character to copy
+   *  @param length the number of characters to copy
+   */
+  public void setTermBuffer(String buffer, int offset, int length) {
+    assert offset <= buffer.length();
+    assert offset + length <= buffer.length();
+    char[] newCharBuffer = growTermBuffer(length);
+    if (newCharBuffer != null) {
+      termBuffer = newCharBuffer;
+    }
+    buffer.getChars(offset, offset + length, termBuffer, 0);
+    termLength = length;
+  }
+
+  /** Returns the internal termBuffer character array which
+   *  you can then directly alter.  If the array is too
+   *  small for your token, use {@link
+   *  #resizeTermBuffer(int)} to increase it.  After
+   *  altering the buffer be sure to call {@link
+   *  #setTermLength} to record the number of valid
+   *  characters that were placed into the termBuffer. */
+  public char[] termBuffer() {
+    initTermBuffer();
+    return termBuffer;
+  }
+
+  /** Grows the termBuffer to at least size newSize, preserving the
+   *  existing content. Note: If the next operation is to change
+   *  the contents of the term buffer use
+   *  {@link #setTermBuffer(char[], int, int)},
+   *  {@link #setTermBuffer(String)}, or
+   *  {@link #setTermBuffer(String, int, int)}
+   *  to optimally combine the resize with the setting of the termBuffer.
+   *  @param newSize minimum size of the new termBuffer
+   *  @return newly created termBuffer with length >= newSize
+   */
+  public char[] resizeTermBuffer(int newSize) {
+    char[] newCharBuffer = growTermBuffer(newSize);
+    if (termBuffer == null) {
+      // If there were termText, then preserve it.
+      // note that if termBuffer is null then newCharBuffer cannot be null
+      assert newCharBuffer != null;
+      termBuffer = newCharBuffer;
+    } else if (newCharBuffer != null) {
+      // Note: if newCharBuffer != null then termBuffer needs to grow.
+      // If there were a termBuffer, then preserve it
+      System.arraycopy(termBuffer, 0, newCharBuffer, 0, termBuffer.length);
+      termBuffer = newCharBuffer;      
+    }
+    return termBuffer;
+  }
+
+  /** Allocates a buffer char[] of at least newSize
+   *  @param newSize minimum size of the buffer
+   *  @return newly created buffer with length >= newSize or null if the current termBuffer is big enough
+   */
+  private char[] growTermBuffer(int newSize) {
+    if (termBuffer != null) {
+      if (termBuffer.length >= newSize)
+        // Already big enough
+        return null;
+      else
+        // Not big enough; create a new array with slight
+        // over allocation:
+        return new char[ArrayUtil.getNextSize(newSize)];
+    } else {
+
+      // determine the best size
+      // The buffer is always at least MIN_BUFFER_SIZE
+      if (newSize < MIN_BUFFER_SIZE) {
+        newSize = MIN_BUFFER_SIZE;
+      }
+
+      return new char[newSize];
+    }
+  }
+
+  // TODO: once we remove the deprecated termText() method
+  // and switch entirely to char[] termBuffer we don't need
+  // to use this method anymore
+  private void initTermBuffer() {
+    if (termBuffer == null) {
+        termBuffer = new char[MIN_BUFFER_SIZE];
+        termLength = 0;
+    }
+  }
+
+  /** Return number of valid characters (length of the term)
+   *  in the termBuffer array. */
+  public int termLength() {
+    return termLength;
+  }
+
+  /** Set number of valid characters (length of the term) in
+   *  the termBuffer array. Use this to truncate the termBuffer
+   *  or to synchronize with external manipulation of the termBuffer.
+   *  Note: to grow the size of the array,
+   *  use {@link #resizeTermBuffer(int)} first.
+   *  @param length the truncated length
+   */
+  public void setTermLength(int length) {
+    initTermBuffer();
+    if (length > termBuffer.length)
+      throw new IllegalArgumentException("length " + length + " exceeds the size of the termBuffer (" + termBuffer.length + ")");
+    termLength = length;
+  }
+
+  public int hashCode() {
+    initTermBuffer();
+    int code = termLength;
+    code = code * 31 + ArrayUtil.hashCode(termBuffer, 0, termLength);
+    return code;
+  }
+
+  public void clear() {
+    termLength = 0;    
+  }
+
+  public Object clone() {
+    TermAttributeImpl t = (TermAttributeImpl)super.clone();
+    // Do a deep clone
+    if (termBuffer != null) {
+      t.termBuffer = (char[]) termBuffer.clone();
+    }
+    return t;
+  }
+  
+  public boolean equals(Object other) {
+    if (other == this) {
+      return true;
+    }
+    
+    if (other instanceof TermAttribute) {
+      initTermBuffer();
+      TermAttributeImpl o = ((TermAttributeImpl) other);
+      o.initTermBuffer();
+      
+      for(int i=0;i<termLength;i++) {
+        if (termBuffer[i] != o.termBuffer[i]) {
+          return false;
+        }
+      }
+      return true;
+    }
+    
+    return false;
+  }
+
+  public String toString() {
+    initTermBuffer();
+    return "term=" + new String(termBuffer, 0, termLength);
+  }
+  
+  public void copyTo(AttributeImpl target) {
+    TermAttribute t = (TermAttribute) target;
+    t.setTermBuffer(termBuffer, 0, termLength);
+  }
+}
Index: src/java/org/apache/lucene/analysis/tokenattributes/TestTeeSinkPerformance.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/TestTeeSinkPerformance.java	(revision 0)
+++ src/java/org/apache/lucene/analysis/tokenattributes/TestTeeSinkPerformance.java	(revision 0)
@@ -0,0 +1,168 @@
+package org.apache.lucene.analysis.tokenattributes;
+
+import java.io.StringReader;
+import java.util.Random;
+
+import org.apache.lucene.analysis.LetterTokenizer;
+import org.apache.lucene.analysis.LowerCaseTokenizer;
+import org.apache.lucene.analysis.SinkTokenizer;
+import org.apache.lucene.analysis.SmallToken;
+import org.apache.lucene.analysis.TeeTokenFilter;
+import org.apache.lucene.analysis.Token;
+
+public class TestTeeSinkPerformance {
+  public static void main(String[] args) throws Exception {
+    StringBuffer buffer = new StringBuffer();
+    Random rnd = new Random(0);
+    
+    final int n = 2 * 1024 * 1024;
+    byte[] bytes = new byte[20];
+    
+    for (int i = 0; i < n; i++) {
+      rnd.nextBytes(bytes);
+      buffer.append(new String(bytes));
+    }
+    
+    String doc = buffer.toString();
+
+    test4(doc);
+  }
+  
+  public static void test1(String doc) throws Exception {
+    StringReader reader = new StringReader(doc);
+    
+    SinkTokenizer sink = new SinkTokenizer();
+    TeeTokenFilter ts = new TeeTokenFilter(new LowerCaseTokenizer(reader), sink);
+    int totalLength = 0;
+    int totalLength1 = 0;
+
+    
+    long t1 = System.currentTimeMillis();
+      Token token = new Token();
+      while( (token = ts.next(token)) != null ) {
+        totalLength += (token.termLength());
+      }
+
+      while( (token = sink.next(token)) != null ) {
+        totalLength1 += (token.termLength());
+      }
+      
+      if (totalLength != totalLength1) {
+        System.err.println("Mismatch in length: " + totalLength + " != " + totalLength1);
+      }
+      
+    long t2 = System.currentTimeMillis();
+    
+    System.out.println("Old: Time: " + (t2 - t1) + " ms. totalLength = " + totalLength);
+  }
+  
+  public static void test2(String doc) throws Exception {
+    StringReader reader = new StringReader(doc);
+    SinkTokenizer sink = new SinkTokenizer();
+    TeeTokenFilter ts = new TeeTokenFilter(new LowerCaseTokenizer(reader), sink);
+    int totalLength = 0;
+    int totalLength1 = 0;
+
+    
+    long t1 = System.currentTimeMillis();
+      TermAttribute termAtt = (TermAttribute) ts.addAttribute(TermAttribute.class);
+      while( ts.incrementToken() ) {
+        totalLength += (termAtt.termLength());
+      }
+  
+      termAtt = (TermAttribute) sink.addAttribute(TermAttribute.class);
+      while( sink.incrementToken() ) {
+        totalLength1 += (termAtt.termLength());
+      }
+      
+      if (totalLength != totalLength1) {
+        System.err.println("Mismatch in length: " + totalLength + " != " + totalLength1);
+      }
+    
+    long t2 = System.currentTimeMillis();
+
+    System.out.println("New: Time: " + (t2 - t1) + " ms. totalLength = " + totalLength);
+  }
+  
+  public static void test3(String doc) throws Exception {
+    StringReader reader = new StringReader(doc);
+    SinkTokenizer sink = new SinkTokenizer();
+    TeeTokenFilter ts = new TeeTokenFilter(new LetterTokenizer(reader) {
+      /** Collects only characters which satisfy
+       * {@link Character#isLetter(char)}.*/
+      protected char normalize(char c) {
+        return Character.toLowerCase(c);
+      }
+      
+      public void initialize() {
+        addAttributeImpl(new Token());
+      }
+
+    }, sink);
+
+    int totalLength = 0;
+    int totalLength1 = 0;
+
+    
+    long t1 = System.currentTimeMillis();
+      TermAttribute termAtt = (TermAttribute) ts.addAttribute(TermAttribute.class);
+      while( ts.incrementToken() ) {
+        totalLength += (termAtt.termLength());
+      }
+  
+      termAtt = (TermAttribute) sink.addAttribute(TermAttribute.class);
+      while( sink.incrementToken() ) {
+        totalLength1 += (termAtt.termLength());
+      }
+      
+      if (totalLength != totalLength1) {
+        System.err.println("Mismatch in length: " + totalLength + " != " + totalLength1);
+      }
+    
+    long t2 = System.currentTimeMillis();
+
+    System.out.println("New with Token: Time: " + (t2 - t1) + " ms. totalLength = " + totalLength);
+  }
+  
+  public static void test4(String doc) throws Exception {
+    StringReader reader = new StringReader(doc);
+    SinkTokenizer sink = new SinkTokenizer();
+    TeeTokenFilter ts = new TeeTokenFilter(new LetterTokenizer(reader) {
+      /** Collects only characters which satisfy
+       * {@link Character#isLetter(char)}.*/
+      protected char normalize(char c) {
+        return Character.toLowerCase(c);
+      }
+      
+      public void initialize() {
+        addAttributeImpl(new SmallToken());
+      }
+
+    }, sink);
+
+    int totalLength = 0;
+    int totalLength1 = 0;
+
+    
+    long t1 = System.currentTimeMillis();
+      TermAttribute termAtt = (TermAttribute) ts.addAttribute(TermAttribute.class);
+      while( ts.incrementToken() ) {
+        totalLength += (termAtt.termLength());
+      }
+  
+      termAtt = (TermAttribute) sink.addAttribute(TermAttribute.class);
+      while( sink.incrementToken() ) {
+        totalLength1 += (termAtt.termLength());
+      }
+      
+      if (totalLength != totalLength1) {
+        System.err.println("Mismatch in length: " + totalLength + " != " + totalLength1);
+      }
+    
+    long t2 = System.currentTimeMillis();
+
+    System.out.println("New with SmallToken: Time: " + (t2 - t1) + " ms. totalLength = " + totalLength);
+
+    
+  }
+}
Index: src/java/org/apache/lucene/analysis/tokenattributes/TokenAttribute.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/TokenAttribute.java	(revision 0)
+++ src/java/org/apache/lucene/analysis/tokenattributes/TokenAttribute.java	(revision 0)
@@ -0,0 +1,9 @@
+package org.apache.lucene.analysis.tokenattributes;
+
+import java.io.Serializable;
+
+import org.apache.lucene.util.Attribute;
+
+public interface TokenAttribute extends Attribute, Cloneable, Serializable {
+	public abstract void clear();
+}
Index: src/java/org/apache/lucene/analysis/tokenattributes/TypeAttribute.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/TypeAttribute.java	(revision 784591)
+++ src/java/org/apache/lucene/analysis/tokenattributes/TypeAttribute.java	(working copy)
@@ -17,10 +17,7 @@
  * limitations under the License.
  */
 
-import java.io.Serializable;
 
-import org.apache.lucene.util.Attribute;
-
 /**
  * A Token's lexical type. The Default value is "word". 
  * 
@@ -29,55 +26,11 @@
  * The APIs introduced in these classes with Lucene 2.9 might change in the future. 
  * We will make our best efforts to keep the APIs backwards-compatible.</font>
  */
-public class TypeAttribute extends Attribute implements Cloneable, Serializable {
-  private String type;
-  public static final String DEFAULT_TYPE = "word";
-  
-  public TypeAttribute() {
-    this(DEFAULT_TYPE); 
-  }
-  
-  public TypeAttribute(String type) {
-    this.type = type;
-  }
-  
+public interface TypeAttribute extends TokenAttribute {
   /** Returns this Token's lexical type.  Defaults to "word". */
-  public String type() {
-    return type;
-  }
+  public String type();
 
   /** Set the lexical type.
       @see #type() */
-  public void setType(String type) {
-    this.type = type;
-  }
-
-  public void clear() {
-    type = DEFAULT_TYPE;    
-  }
-
-  public String toString() {
-    return "type=" + type;
-  }
-
-  public boolean equals(Object other) {
-    if (other == this) {
-      return true;
-    }
-    
-    if (other instanceof TypeAttribute) {
-      return type.equals(((TypeAttribute) other).type);
-    }
-    
-    return false;
-  }
-
-  public int hashCode() {
-    return type.hashCode();
-  }
-  
-  public void copyTo(Attribute target) {
-    TypeAttribute t = (TypeAttribute) target;
-    t.setType(new String(type));
-  }
+  public void setType(String type);
 }
Index: src/java/org/apache/lucene/analysis/tokenattributes/TypeAttributeImpl.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/TypeAttributeImpl.java	(revision 0)
+++ src/java/org/apache/lucene/analysis/tokenattributes/TypeAttributeImpl.java	(revision 0)
@@ -0,0 +1,79 @@
+package org.apache.lucene.analysis.tokenattributes;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Serializable;
+
+import org.apache.lucene.util.AttributeImpl;
+
+/**
+ * A Token's lexical type. The Default value is "word". 
+ * 
+ * <p><font color="#FF0000">
+ * WARNING: The status of the new TokenStream, AttributeSource and Attributes is experimental. 
+ * The APIs introduced in these classes with Lucene 2.9 might change in the future. 
+ * We will make our best efforts to keep the APIs backwards-compatible.</font>
+ */
+public class TypeAttributeImpl extends AttributeImpl implements TypeAttribute, Cloneable, Serializable {
+  private String type;
+  public static final String DEFAULT_TYPE = "word";
+  
+  public TypeAttributeImpl() {
+    this(DEFAULT_TYPE); 
+  }
+  
+  public TypeAttributeImpl(String type) {
+    this.type = type;
+  }
+  
+  /** Returns this Token's lexical type.  Defaults to "word". */
+  public String type() {
+    return type;
+  }
+
+  /** Set the lexical type.
+      @see #type() */
+  public void setType(String type) {
+    this.type = type;
+  }
+
+  public void clear() {
+    type = DEFAULT_TYPE;    
+  }
+
+  public boolean equals(Object other) {
+    if (other == this) {
+      return true;
+    }
+    
+    if (other instanceof TypeAttributeImpl) {
+      return type.equals(((TypeAttributeImpl) other).type);
+    }
+    
+    return false;
+  }
+
+  public int hashCode() {
+    return type.hashCode();
+  }
+  
+  public void copyTo(AttributeImpl target) {
+    TypeAttribute t = (TypeAttribute) target;
+    t.setType(new String(type));
+  }
+}
Index: src/java/org/apache/lucene/analysis/CachingTokenFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/CachingTokenFilter.java	(revision 784591)
+++ src/java/org/apache/lucene/analysis/CachingTokenFilter.java	(working copy)
@@ -55,8 +55,8 @@
       return false;
     }
     // Since the TokenFilter can be reset, the tokens need to be preserved as immutable.
-    AttributeSource state = (AttributeSource) iterator.next();
-    state.restoreState(this);
+    AttributeSource.State state = (AttributeSource.State) iterator.next();
+    restoreState(state);
     return true;
   }
   
Index: src/java/org/apache/lucene/analysis/SinkTokenizer.java
===================================================================
--- src/java/org/apache/lucene/analysis/SinkTokenizer.java	(revision 784591)
+++ src/java/org/apache/lucene/analysis/SinkTokenizer.java	(working copy)
@@ -71,15 +71,16 @@
     if (iter == null) iter = lst.iterator();
     // Since this TokenStream can be reset we have to maintain the tokens as immutable
     if (iter.hasNext()) {
-      AttributeSource state = (AttributeSource) iter.next();
-      state.restoreState(this);
+      AttributeSource.State state = (AttributeSource.State) iter.next();
+      this.restoreState(state);
       return true;
     }
     return false;
   }
 
   public void add(AttributeSource source) throws IOException {
-    lst.add(source); 
+    AttributeSource.State state = source.captureState();
+    lst.add(state); 
   }
   
   /**
Index: src/java/org/apache/lucene/analysis/SmallToken.java
===================================================================
--- src/java/org/apache/lucene/analysis/SmallToken.java	(revision 0)
+++ src/java/org/apache/lucene/analysis/SmallToken.java	(revision 0)
@@ -0,0 +1,496 @@
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.index.Payload;
+import org.apache.lucene.index.TermPositions;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.AttributeImpl;
+
+/** 
+  This class is now deprecated and a new TokenStream API was introduced with Lucene 2.9.
+  See Javadocs in {@link TokenStream} for further details.
+  <p> 
+  A Token is an occurrence of a term from the text of a field.  It consists of
+  a term's text, the start and end offset of the term in the text of the field,
+  and a type string.
+  <p>
+  The start and end offsets permit applications to re-associate a token with
+  its source text, e.g., to display highlighted query terms in a document
+  browser, or to show matching text fragments in a KWIC (KeyWord In Context)
+  display, etc.
+  <p>
+  The type is a string, assigned by a lexical analyzer
+  (a.k.a. tokenizer), naming the lexical or syntactic class that the token
+  belongs to.  For example an end of sentence marker token might be implemented
+  with type "eos".  The default token type is "word".  
+  <p>
+  A Token can optionally have metadata (a.k.a. Payload) in the form of a variable
+  length byte array. Use {@link TermPositions#getPayloadLength()} and 
+  {@link TermPositions#getPayload(byte[], int)} to retrieve the payloads from the index.
+  
+  <br><br>
+  <p><font color="#FF0000">
+  WARNING: The status of the <b>Payloads</b> feature is experimental. 
+  The APIs introduced here might change in the future and will not be 
+  supported anymore in such a case.</font>
+
+  <br><br>
+
+  <p><b>NOTE:</b> As of 2.3, Token stores the term text
+  internally as a malleable char[] termBuffer instead of
+  String termText.  The indexing code and core tokenizers
+  have been changed to re-use a single Token instance, changing
+  its buffer and other fields in-place as the Token is
+  processed.  This provides substantially better indexing
+  performance as it saves the GC cost of new'ing a Token and
+  String for every term.  The APIs that accept String
+  termText are still available but a warning about the
+  associated performance cost has been added (below).  The
+  {@link #termText()} method has been deprecated.</p>
+  
+  <p>Tokenizers and filters should try to re-use a Token
+  instance when possible for best performance, by
+  implementing the {@link TokenStream#next(Token)} API.
+  Failing that, to create a new Token you should first use
+  one of the constructors that starts with null text.  To load
+  the token from a char[] use {@link #setTermBuffer(char[], int, int)}.
+  To load from a String use {@link #setTermBuffer(String)} or {@link #setTermBuffer(String, int, int)}.
+  Alternatively you can get the Token's termBuffer by calling either {@link #termBuffer()},
+  if you know that your text is shorter than the capacity of the termBuffer
+  or {@link #resizeTermBuffer(int)}, if there is any possibility
+  that you may need to grow the buffer. Fill in the characters of your term into this
+  buffer, with {@link String#getChars(int, int, char[], int)} if loading from a string,
+  or with {@link System#arraycopy(Object, int, Object, int, int)}, and finally call {@link #setTermLength(int)} to
+  set the length of the term text.  See <a target="_top"
+  href="https://issues.apache.org/jira/browse/LUCENE-969">LUCENE-969</a>
+  for details.</p>
+  <p>Typical reuse patterns:
+  <ul>
+  <li> Copying text from a string (type is reset to #DEFAULT_TYPE if not specified):<br/>
+  <pre>
+    return reusableToken.reinit(string, startOffset, endOffset[, type]);
+  </pre>
+  </li>
+  <li> Copying some text from a string (type is reset to #DEFAULT_TYPE if not specified):<br/>
+  <pre>
+    return reusableToken.reinit(string, 0, string.length(), startOffset, endOffset[, type]);
+  </pre>
+  </li>
+  </li>
+  <li> Copying text from char[] buffer (type is reset to #DEFAULT_TYPE if not specified):<br/>
+  <pre>
+    return reusableToken.reinit(buffer, 0, buffer.length, startOffset, endOffset[, type]);
+  </pre>
+  </li>
+  <li> Copying some text from a char[] buffer (type is reset to #DEFAULT_TYPE if not specified):<br/>
+  <pre>
+    return reusableToken.reinit(buffer, start, end - start, startOffset, endOffset[, type]);
+  </pre>
+  </li>
+  <li> Copying from one one Token to another (type is reset to #DEFAULT_TYPE if not specified):<br/>
+  <pre>
+    return reusableToken.reinit(source.termBuffer(), 0, source.termLength(), source.startOffset(), source.endOffset()[, source.type()]);
+  </pre>
+  </li>
+  </ul>
+  A few things to note:
+  <ul>
+  <li>clear() initializes most of the fields to default values, but not startOffset, endOffset and type.</li>
+  <li>Because <code>TokenStreams</code> can be chained, one cannot assume that the <code>Token's</code> current type is correct.</li>
+  <li>The startOffset and endOffset represent the start and offset in the source text. So be careful in adjusting them.</li>
+  <li>When caching a reusable token, clone it. When injecting a cached token into a stream that can be reset, clone it again.</li>
+  </ul>
+  </p>
+
+  @see org.apache.lucene.index.Payload
+*/
+public class SmallToken extends AttributeImpl implements Cloneable, TermAttribute, PositionIncrementAttribute,
+                                      OffsetAttribute {
+
+  public static final String DEFAULT_TYPE = "word";
+
+  private static int MIN_BUFFER_SIZE = 10;
+
+  /**
+   * Characters for the term text.
+   * @deprecated This will be made private. Instead, use:
+   * {@link termBuffer()}, 
+   * {@link #setTermBuffer(char[], int, int)},
+   * {@link #setTermBuffer(String)}, or
+   * {@link #setTermBuffer(String, int, int)}
+   */
+  char[] termBuffer;
+
+  /**
+   * Length of term text in the buffer.
+   * @deprecated This will be made private. Instead, use:
+   * {@link termLength()}, or @{link setTermLength(int)}.
+   */
+  int termLength;
+
+  /**
+   * Start in source text.
+   * @deprecated This will be made private. Instead, use:
+   * {@link startOffset()}, or @{link setStartOffset(int)}.
+   */
+  int startOffset;
+
+  /**
+   * End in source text.
+   * @deprecated This will be made private. Instead, use:
+   * {@link endOffset()}, or @{link setEndOffset(int)}.
+   */
+  int endOffset;
+
+  /**
+   * The lexical type of the token.
+   * @deprecated This will be made private. Instead, use:
+   * {@link type()}, or @{link setType(String)}.
+   */
+  
+
+
+  
+  /**
+   * @deprecated This will be made private. Instead, use:
+   * {@link getPositionIncrement()}, or @{link setPositionIncrement(String)}.
+   */
+  int positionIncrement = 1;
+
+  /** Constructs a Token will null text. */
+  public SmallToken() {
+  }
+  
+
+  /** Set the position increment.  This determines the position of this token
+   * relative to the previous Token in a {@link TokenStream}, used in phrase
+   * searching.
+   *
+   * <p>The default value is one.
+   *
+   * <p>Some common uses for this are:<ul>
+   *
+   * <li>Set it to zero to put multiple terms in the same position.  This is
+   * useful if, e.g., a word has multiple stems.  Searches for phrases
+   * including either stem will match.  In this case, all but the first stem's
+   * increment should be set to zero: the increment of the first instance
+   * should be one.  Repeating a token with an increment of zero can also be
+   * used to boost the scores of matches on that token.
+   *
+   * <li>Set it to values greater than one to inhibit exact phrase matches.
+   * If, for example, one does not want phrases to match across removed stop
+   * words, then one could build a stop word filter that removes stop words and
+   * also sets the increment to the number of stop words removed before each
+   * non-stop word.  Then exact phrase queries will only match when the terms
+   * occur with no intervening stop words.
+   *
+   * </ul>
+   * @param positionIncrement the distance from the prior term
+   * @see org.apache.lucene.index.TermPositions
+   */
+  public void setPositionIncrement(int positionIncrement) {
+    if (positionIncrement < 0)
+      throw new IllegalArgumentException
+        ("Increment must be zero or greater: " + positionIncrement);
+    this.positionIncrement = positionIncrement;
+  }
+
+  /** Returns the position increment of this Token.
+   * @see #setPositionIncrement
+   */
+  public int getPositionIncrement() {
+    return positionIncrement;
+  }
+
+  /** Returns the Token's term text.
+   * 
+   * @deprecated This method now has a performance penalty
+   * because the text is stored internally in a char[].  If
+   * possible, use {@link #termBuffer()} and {@link
+   * #termLength()} directly instead.  If you really need a
+   * String, use {@link #term()}</b>
+   */
+  public final String termText() {
+    if (termBuffer != null)
+      return new String(termBuffer, 0, termLength);
+    else
+      return null;
+  }
+
+  /** Copies the contents of buffer, starting at offset for
+   *  length characters, into the termBuffer array.
+   *  @param buffer the buffer to copy
+   *  @param offset the index in the buffer of the first character to copy
+   *  @param length the number of characters to copy
+   */
+  public final void setTermBuffer(char[] buffer, int offset, int length) {
+    char[] newCharBuffer = growTermBuffer(length);
+    if (newCharBuffer != null) {
+      termBuffer = newCharBuffer;
+    }
+    System.arraycopy(buffer, offset, termBuffer, 0, length);
+    termLength = length;
+  }
+
+  /** Copies the contents of buffer into the termBuffer array.
+   *  @param buffer the buffer to copy
+   */
+  public final void setTermBuffer(String buffer) {
+    int length = buffer.length();
+    char[] newCharBuffer = growTermBuffer(length);
+    if (newCharBuffer != null) {
+      termBuffer = newCharBuffer;
+    }
+    buffer.getChars(0, length, termBuffer, 0);
+    termLength = length;
+  }
+
+  /** Copies the contents of buffer, starting at offset and continuing
+   *  for length characters, into the termBuffer array.
+   *  @param buffer the buffer to copy
+   *  @param offset the index in the buffer of the first character to copy
+   *  @param length the number of characters to copy
+   */
+  public final void setTermBuffer(String buffer, int offset, int length) {
+    assert offset <= buffer.length();
+    assert offset + length <= buffer.length();
+    char[] newCharBuffer = growTermBuffer(length);
+    if (newCharBuffer != null) {
+      termBuffer = newCharBuffer;
+    }
+    buffer.getChars(offset, offset + length, termBuffer, 0);
+    termLength = length;
+  }
+
+  /** Returns the internal termBuffer character array which
+   *  you can then directly alter.  If the array is too
+   *  small for your token, use {@link
+   *  #resizeTermBuffer(int)} to increase it.  After
+   *  altering the buffer be sure to call {@link
+   *  #setTermLength} to record the number of valid
+   *  characters that were placed into the termBuffer. */
+  public final char[] termBuffer() {
+    initTermBuffer();
+    return termBuffer;
+  }
+
+  /** Grows the termBuffer to at least size newSize, preserving the
+   *  existing content. Note: If the next operation is to change
+   *  the contents of the term buffer use
+   *  {@link #setTermBuffer(char[], int, int)},
+   *  {@link #setTermBuffer(String)}, or
+   *  {@link #setTermBuffer(String, int, int)}
+   *  to optimally combine the resize with the setting of the termBuffer.
+   *  @param newSize minimum size of the new termBuffer
+   *  @return newly created termBuffer with length >= newSize
+   */
+  public char[] resizeTermBuffer(int newSize) {
+    char[] newCharBuffer = growTermBuffer(newSize);
+    if (termBuffer == null) {
+      // If there were termText, then preserve it.
+      // note that if termBuffer is null then newCharBuffer cannot be null
+      assert newCharBuffer != null;
+      termBuffer = newCharBuffer;
+    } else if (newCharBuffer != null) {
+      // Note: if newCharBuffer != null then termBuffer needs to grow.
+      // If there were a termBuffer, then preserve it
+      System.arraycopy(termBuffer, 0, newCharBuffer, 0, termBuffer.length);
+      termBuffer = newCharBuffer;      
+    }
+    return termBuffer;
+  }
+
+  /** Allocates a buffer char[] of at least newSize
+   *  @param newSize minimum size of the buffer
+   *  @return newly created buffer with length >= newSize or null if the current termBuffer is big enough
+   */
+  private char[] growTermBuffer(int newSize) {
+    if (termBuffer != null) {
+      if (termBuffer.length >= newSize)
+        // Already big enough
+        return null;
+      else
+        // Not big enough; create a new array with slight
+        // over allocation:
+        return new char[ArrayUtil.getNextSize(newSize)];
+    } else {
+
+      // determine the best size
+      // The buffer is always at least MIN_BUFFER_SIZE
+      if (newSize < MIN_BUFFER_SIZE) {
+        newSize = MIN_BUFFER_SIZE;
+      }
+
+      return new char[newSize];
+    }
+  }
+
+  // TODO: once we remove the deprecated termText() method
+  // and switch entirely to char[] termBuffer we don't need
+  // to use this method anymore
+  public void initTermBuffer() {
+    if (termBuffer == null) {
+        termBuffer = new char[MIN_BUFFER_SIZE];
+        termLength = 0;
+    }
+  }
+
+  /** Return number of valid characters (length of the term)
+   *  in the termBuffer array. */
+  public final int termLength() {
+    initTermBuffer();
+    return termLength;
+  }
+
+  /** Set number of valid characters (length of the term) in
+   *  the termBuffer array. Use this to truncate the termBuffer
+   *  or to synchronize with external manipulation of the termBuffer.
+   *  Note: to grow the size of the array,
+   *  use {@link #resizeTermBuffer(int)} first.
+   *  @param length the truncated length
+   */
+  public final void setTermLength(int length) {
+    initTermBuffer();
+    if (length > termBuffer.length)
+      throw new IllegalArgumentException("length " + length + " exceeds the size of the termBuffer (" + termBuffer.length + ")");
+    termLength = length;
+  }
+
+  /** Returns this Token's starting offset, the position of the first character
+    corresponding to this token in the source text.
+
+    Note that the difference between endOffset() and startOffset() may not be
+    equal to termText.length(), as the term text may have been altered by a
+    stemmer or some other filter. */
+  public final int startOffset() {
+    return startOffset;
+  }
+
+  /** Set the starting offset.
+      @see #startOffset() */
+  public void setStartOffset(int offset) {
+    this.startOffset = offset;
+  }
+
+  /** Returns this Token's ending offset, one greater than the position of the
+    last character corresponding to this token in the source text. The length
+    of the token in the source text is (endOffset - startOffset). */
+  public final int endOffset() {
+    return endOffset;
+  }
+
+  /** Set the ending offset.
+      @see #endOffset() */
+  public void setEndOffset(int offset) {
+    this.endOffset = offset;
+  }
+
+  public Object clone() {
+      SmallToken t = (SmallToken)super.clone();
+      // Do a deep clone
+      if (termBuffer != null) {
+        t.termBuffer = (char[]) termBuffer.clone();
+      }
+      return t;
+  }
+
+  private boolean subEqual(Object o1, Object o2) {
+    if (o1 == null)
+      return o2 == null;
+    else
+      return o1.equals(o2);
+  }
+  
+  public void clear() {
+    // Leave termBuffer to allow re-use
+    termLength = 0;
+    positionIncrement = 1;
+    // startOffset = endOffset = 0;
+    // type = DEFAULT_TYPE;
+  }
+
+
+  /**
+   * Copy the prototype token's fields into this one. Note: Payloads are shared.
+   * @param prototype
+   */
+  public void reinit(SmallToken prototype) {
+    prototype.initTermBuffer();
+    setTermBuffer(prototype.termBuffer, 0, prototype.termLength);
+    positionIncrement = prototype.positionIncrement;
+    startOffset = prototype.startOffset;
+    endOffset = prototype.endOffset;
+  }
+
+  public void copyTo(AttributeImpl target) {
+    SmallToken to = (SmallToken) target;
+    to.reinit(this);
+  }
+
+
+  public boolean equals(Object obj) {
+    if (obj == this)
+      return true;
+
+    if (obj instanceof SmallToken) {
+      SmallToken other = (SmallToken) obj;
+
+      initTermBuffer();
+      other.initTermBuffer();
+      
+      if (termLength == other.termLength &&
+          startOffset == other.startOffset &&
+          endOffset == other.endOffset && 
+          positionIncrement == other.positionIncrement) {
+        for(int i=0;i<termLength;i++)
+          if (termBuffer[i] != other.termBuffer[i])
+            return false;
+        return true;
+      } else
+        return false;
+    } else
+      return false;
+  }
+
+
+  public int hashCode() {
+    initTermBuffer();
+    int code = termLength;
+    code = code * 31 + startOffset;
+    code = code * 31 + endOffset;
+    code = code * 31 + positionIncrement;
+    code = code * 31 + ArrayUtil.hashCode(termBuffer, 0, termLength);
+    return code;
+  }
+
+
+  public void setOffset(int startOffset, int endOffset) {
+    this.startOffset = startOffset;
+    this.endOffset = endOffset;
+  }
+
+
+  public String term() {
+    return termText();
+  }
+}
Index: src/java/org/apache/lucene/analysis/TeeTokenFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/TeeTokenFilter.java	(revision 784591)
+++ src/java/org/apache/lucene/analysis/TeeTokenFilter.java	(working copy)
@@ -20,7 +20,10 @@
 import java.io.IOException;
 import java.util.Iterator;
 
+import org.apache.lucene.util.Attribute;
+import org.apache.lucene.util.AttributeImpl;
 
+
 /**
  * Works in conjunction with the SinkTokenizer to provide the ability to set aside tokens
  * that have already been analyzed.  This is useful in situations where multiple fields share
@@ -63,13 +66,13 @@
     this.sink = sink;
     Iterator it = getAttributesIterator();
     while (it.hasNext()) {
-      sink.addAttribute(it.next().getClass());
+      sink.addAttributeImpl((AttributeImpl) it.next());
     }
   }
   
   public boolean incrementToken() throws IOException {
     if (input.incrementToken()) {
-      sink.add(captureState());
+      sink.add(this);
       return true;
     }
     return false;
Index: src/java/org/apache/lucene/analysis/Token.java
===================================================================
--- src/java/org/apache/lucene/analysis/Token.java	(revision 784591)
+++ src/java/org/apache/lucene/analysis/Token.java	(working copy)
@@ -17,9 +17,16 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 import org.apache.lucene.index.Payload;
 import org.apache.lucene.index.TermPositions;     // for javadoc
 import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.AttributeImpl;
 
 /** 
   This class is now deprecated and a new TokenStream API was introduced with Lucene 2.9.
@@ -118,10 +125,10 @@
   </p>
 
   @see org.apache.lucene.index.Payload
-  @deprecated A new TokenStream API was introduced with Lucene 2.9.
-              See javadocs in {@link TokenStream} for further details.
 */
-public class Token implements Cloneable {
+public class Token extends AttributeImpl 
+                   implements Cloneable, TermAttribute, TypeAttribute, PositionIncrementAttribute,
+                              FlagsAttribute, OffsetAttribute, PayloadAttribute {
 
   public static final String DEFAULT_TYPE = "word";
 
@@ -561,6 +568,13 @@
   public void setEndOffset(int offset) {
     this.endOffset = offset;
   }
+  
+  /** Set the starting and ending offset.
+  @see #startOffset() and #endOffset()*/
+  public void setOffset(int startOffset, int endOffset) {
+    this.startOffset = startOffset;
+    this.endOffset = endOffset;
+  }
 
   /** Returns this Token's lexical type.  Defaults to "word". */
   public final String type() {
@@ -640,19 +654,15 @@
   }
 
   public Object clone() {
-    try {
-      Token t = (Token)super.clone();
-      // Do a deep clone
-      if (termBuffer != null) {
-        t.termBuffer = (char[]) termBuffer.clone();
-      }
-      if (payload != null) {
-        t.setPayload((Payload) payload.clone());
-      }
-      return t;
-    } catch (CloneNotSupportedException e) {
-      throw new RuntimeException(e);  // shouldn't happen
+    Token t = (Token)super.clone();
+    // Do a deep clone
+    if (termBuffer != null) {
+      t.termBuffer = (char[]) termBuffer.clone();
     }
+    if (payload != null) {
+      t.setPayload((Payload) payload.clone());
+    }
+    return t;
   }
 
   /** Makes a clone, but replaces the term buffer &
@@ -862,4 +872,9 @@
     type = prototype.type;
     payload =  prototype.payload;
   }
+
+  public void copyTo(AttributeImpl target) {
+    Token to = (Token) target;
+    to.reinit(this);
+  }
 }
Index: src/java/org/apache/lucene/analysis/TokenFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/TokenFilter.java	(revision 784591)
+++ src/java/org/apache/lucene/analysis/TokenFilter.java	(working copy)
@@ -53,17 +53,4 @@
     super.reset();
     input.reset();
   }
-  
-  public boolean useNewAPI() {
-    return input.useNewAPI();
-  }
-
-  /**
-   * Sets whether or not to use the new TokenStream API. Settings this
-   * will apply to this Filter and all TokenStream/Filters upstream.
-   */
-  public void setUseNewAPI(boolean use) {
-    input.setUseNewAPI(use);
-  }
-
 }
Index: src/java/org/apache/lucene/analysis/TokenStream.java
===================================================================
--- src/java/org/apache/lucene/analysis/TokenStream.java	(revision 784591)
+++ src/java/org/apache/lucene/analysis/TokenStream.java	(working copy)
@@ -21,7 +21,7 @@
 import java.util.Iterator;
 
 import org.apache.lucene.index.Payload;
-import org.apache.lucene.util.Attribute;
+import org.apache.lucene.util.AttributeImpl;
 import org.apache.lucene.util.AttributeSource;
 
 /** A TokenStream enumerates the sequence of tokens, either from
@@ -36,13 +36,13 @@
   </ul>
   A new TokenStream API is introduced with Lucene 2.9. Since
   2.9 Token is deprecated and the preferred way to store
-  the information of a token is to use {@link Attribute}s.
+  the information of a token is to use {@link AttributeImpl}s.
   <p>
   For that reason TokenStream extends {@link AttributeSource}
-  now. Note that only one instance per {@link Attribute} is
+  now. Note that only one instance per {@link AttributeImpl} is
   created and reused for every token. This approach reduces
   object creations and allows local caching of references to
-  the {@link Attribute}s. See {@link #incrementToken()} for further details.
+  the {@link AttributeImpl}s. See {@link #incrementToken()} for further details.
   <p>
   <b>The workflow of the new TokenStream API is as follows:</b>
   <ol>
@@ -79,10 +79,7 @@
  * We will make our best efforts to keep the APIs backwards-compatible.</font>
   */
 
-public abstract class TokenStream extends AttributeSource {
-  private static boolean useNewAPIDefault = false;
-  private boolean useNewAPI = useNewAPIDefault;
-  
+public abstract class TokenStream extends AttributeSource {  
   protected TokenStream() {
     super();
   }
@@ -90,68 +87,17 @@
   protected TokenStream(AttributeSource input) {
     super(input);
   }
-
-  /**
-   * Returns whether or not the new TokenStream APIs are used
-   * by default. 
-   * (see {@link #incrementToken()}, {@link AttributeSource}).
-   */
-  public static boolean useNewAPIDefault() {
-    return useNewAPIDefault;
-  }
-
-  /**
-   * Use this API to enable or disable the new TokenStream API.
-   * by default. Can be overridden by calling {@link #setUseNewAPI(boolean)}. 
-   * (see {@link #incrementToken()}, {@link AttributeSource}).
-   * <p>
-   * If set to true, the indexer will call {@link #incrementToken()} 
-   * to consume Tokens from this stream.
-   * <p>
-   * If set to false, the indexer will call {@link #next(Token)}
-   * instead. 
-   */
-  public static void setUseNewAPIDefault(boolean use) {
-    useNewAPIDefault = use;
-  }
-  
-  /**
-   * Returns whether or not the new TokenStream APIs are used 
-   * for this stream.
-   * (see {@link #incrementToken()}, {@link AttributeSource}).
-   */
-  public boolean useNewAPI() {
-    return useNewAPI;
-  }
-
-  /**
-   * Use this API to enable or disable the new TokenStream API
-   * for this stream. Overrides {@link #setUseNewAPIDefault(boolean)}.
-   * (see {@link #incrementToken()}, {@link AttributeSource}).
-   * <p>
-   * If set to true, the indexer will call {@link #incrementToken()} 
-   * to consume Tokens from this stream.
-   * <p>
-   * If set to false, the indexer will call {@link #next(Token)}
-   * instead. 
-   * <p>
-   * <b>NOTE: All streams and filters in one chain must use the
-   * same API. </b>
-   */
-  public void setUseNewAPI(boolean use) {
-    useNewAPI = use;
-  }
     	
 	/**
 	 * Consumers (e. g. the indexer) use this method to advance the stream 
 	 * to the next token. Implementing classes must implement this method 
-	 * and update the appropriate {@link Attribute}s with content of the 
+	 * and update the appropriate {@link AttributeImpl}s with content of the 
 	 * next token.
 	 * <p>
 	 * This method is called for every token of a document, so an efficient
 	 * implementation is crucial for good performance. To avoid calls to 
 	 * {@link #addAttribute(Class)} and {@link #getAttribute(Class)} and
-	 * downcasts, references to all {@link Attribute}s that this stream uses 
+	 * downcasts, references to all {@link AttributeImpl}s that this stream uses 
 	 * should be retrieved during instantiation.   
 	 * <p>
 	 * To make sure that filters and consumers know which attributes are available
@@ -165,7 +111,7 @@
 	 */
 	public boolean incrementToken() throws IOException {
 	  // subclasses must implement this method; will be made abstract in Lucene 3.0
-	  return false;
+	  throw new UnsupportedTokenStreamAPIException("This stream or filter does not support the new Lucene 2.9 TokenStream API yet.");
 	}
 	
   /** Returns the next token in the stream, or null at EOS.
@@ -259,5 +205,11 @@
     sb.append(')');
     return sb.toString();
   }
+  
+  public static final class UnsupportedTokenStreamAPIException extends UnsupportedOperationException {
+    private UnsupportedTokenStreamAPIException(String message) {
+      super(message);
+    }
+  }
 
 }
Index: src/java/org/apache/lucene/index/DocInverterPerField.java
===================================================================
--- src/java/org/apache/lucene/index/DocInverterPerField.java	(revision 784591)
+++ src/java/org/apache/lucene/index/DocInverterPerField.java	(working copy)
@@ -22,6 +22,7 @@
 import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.TokenStream.UnsupportedTokenStreamAPIException;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 
@@ -132,21 +133,34 @@
           try {
             int offsetEnd = fieldState.offset-1;
             
-            boolean useNewTokenStreamAPI = stream.useNewAPI();
             Token localToken = null;
+            Token token = null;
             
+            boolean useNewTokenStreamAPI = true;
+            boolean hasMoreTokens = false;
+            try {
+              hasMoreTokens = stream.incrementToken();
+            } catch (IOException e) {
+              // ignore
+            } catch (UnsupportedTokenStreamAPIException e) {
+              // fall back to old API
+              useNewTokenStreamAPI = false;
+            }
+
             if (useNewTokenStreamAPI) {
               fieldState.attributeSource = stream;
             } else {              
               fieldState.attributeSource = perThread.localTokenStream;
               localToken = perThread.localToken;
+              token = stream.next(localToken);
+              hasMoreTokens = (token != null);              
             }         
-            
-            consumer.start(field);
 
             OffsetAttribute offsetAttribute = (OffsetAttribute) fieldState.attributeSource.addAttribute(OffsetAttribute.class);
             PositionIncrementAttribute posIncrAttribute = (PositionIncrementAttribute) fieldState.attributeSource.addAttribute(PositionIncrementAttribute.class);
             
+            consumer.start(field);
+            
             for(;;) {
 
               // If we hit an exception in stream.next below
@@ -155,15 +169,9 @@
               // non-aborting and (above) this one document
               // will be marked as deleted, but still
               // consume a docID
-              Token token = null;
-              if (useNewTokenStreamAPI) {
-                if (!stream.incrementToken()) break;
-              } else {
-                token = stream.next(localToken);
-                if (token == null) break;
-                perThread.localTokenStream.set(token);
-              }
               
+              if (!hasMoreTokens) break;
+              
               final int posIncr = posIncrAttribute.getPositionIncrement();
               fieldState.position += posIncr;
               if (allowMinus1Position || fieldState.position > 0) {
@@ -194,6 +202,18 @@
                   docState.infoStream.println("maxFieldLength " +maxFieldLength+ " reached for field " + fieldInfo.name + ", ignoring following tokens");
                 break;
               }
+
+              if (useNewTokenStreamAPI) {
+                hasMoreTokens = stream.incrementToken();
+              } else {
+                token = stream.next(localToken);
+                if (token != null) {
+                  hasMoreTokens = true;
+                  perThread.localTokenStream.set(token);
+                } else {
+                  hasMoreTokens = false;
+                }
+              }
             }
             fieldState.offset = offsetEnd+1;
           } finally {
Index: src/java/org/apache/lucene/index/DocInverterPerThread.java
===================================================================
--- src/java/org/apache/lucene/index/DocInverterPerThread.java	(revision 784591)
+++ src/java/org/apache/lucene/index/DocInverterPerThread.java	(working copy)
@@ -22,12 +22,17 @@
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
+import org.apache.lucene.analysis.tokenattributes.FlagsAttributeImpl;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttributeImpl;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttributeImpl;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttributeImpl;
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttributeImpl;
 import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.util.Attribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttributeImpl;
 
 /** This is a DocFieldConsumer that inverts each field,
  *  separately, from a Document, and accepts a
@@ -64,7 +69,7 @@
   static class BackwardsCompatibilityStream extends TokenStream {
     private Token token;
       
-    TermAttribute termAttribute = new TermAttribute() {
+    TermAttribute termAttribute = new TermAttributeImpl() {
       public String term() {
         return token.term();
       }
@@ -77,7 +82,7 @@
         return token.termLength();
       }
     };
-    OffsetAttribute offsetAttribute = new OffsetAttribute() {
+    OffsetAttribute offsetAttribute = new OffsetAttributeImpl() {
       public int startOffset() {
         return token.startOffset();
       }
@@ -87,25 +92,25 @@
       }
     };
     
-    PositionIncrementAttribute positionIncrementAttribute = new PositionIncrementAttribute() {
+    PositionIncrementAttribute positionIncrementAttribute = new PositionIncrementAttributeImpl() {
       public int getPositionIncrement() {
         return token.getPositionIncrement();
       }
     };
     
-    FlagsAttribute flagsAttribute = new FlagsAttribute() {
+    FlagsAttribute flagsAttribute = new FlagsAttributeImpl() {
       public int getFlags() {
         return token.getFlags();
       }
     };
     
-    PayloadAttribute payloadAttribute = new PayloadAttribute() {
+    PayloadAttribute payloadAttribute = new PayloadAttributeImpl() {
       public Payload getPayload() {
         return token.getPayload();
       }
     };
     
-    TypeAttribute typeAttribute = new TypeAttribute() {
+    TypeAttribute typeAttribute = new TypeAttributeImpl() {
       public String type() {
         return token.type();
       }
Index: src/java/org/apache/lucene/queryParser/QueryParser.java
===================================================================
--- src/java/org/apache/lucene/queryParser/QueryParser.java	(revision 784591)
+++ src/java/org/apache/lucene/queryParser/QueryParser.java	(working copy)
@@ -17,6 +17,7 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.CachingTokenFilter;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.TokenStream.UnsupportedTokenStreamAPIException;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.document.DateField;
@@ -529,36 +530,40 @@
     org.apache.lucene.analysis.Token reusableToken = null;
     org.apache.lucene.analysis.Token nextToken = null;
 
-
-    boolean useNewAPI = TokenStream.useNewAPIDefault();
-
-    if (useNewAPI) {
-      boolean success = false;
-      try {
-        buffer.reset();
-        success = true;
-      } catch (IOException e) {
-        // success==false if we hit an exception
+    boolean success = false;
+    try {
+      buffer.reset();
+      success = true;
+    } catch (IOException e) {
+      // success==false if we hit an exception
+    }
+    if (success) {
+      if (buffer.hasAttribute(TermAttribute.class)) {
+        termAtt = (TermAttribute) buffer.getAttribute(TermAttribute.class);
       }
-      if (success) {
-        if (buffer.hasAttribute(TermAttribute.class)) {
-          termAtt = (TermAttribute) buffer.getAttribute(TermAttribute.class);
-        }
-        if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
-          posIncrAtt = (PositionIncrementAttribute) buffer.getAttribute(PositionIncrementAttribute.class);
-        }
+      if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
+        posIncrAtt = (PositionIncrementAttribute) buffer.getAttribute(PositionIncrementAttribute.class);
       }
-    } else {
-      reusableToken = new org.apache.lucene.analysis.Token();
     }
 
     int positionCount = 0;
     boolean severalTokensAtSamePosition = false;
 
+    boolean useNewAPI = true;
+    boolean hasMoreTokens = false;
+    try {
+      hasMoreTokens = buffer.incrementToken();
+    } catch (IOException e) {
+      // ignore
+    } catch (UnsupportedTokenStreamAPIException e) {
+      // fall back to old API
+      useNewAPI = false;
+    }
+    
     if (useNewAPI) {
       if (termAtt != null) {
         try {
-          while (buffer.incrementToken()) {
+          while (hasMoreTokens) {
             numTokens++;
             int positionIncrement = (posIncrAtt != null) ? posIncrAtt.getPositionIncrement() : 1;
             if (positionIncrement != 0) {
@@ -566,12 +571,16 @@
             } else {
               severalTokensAtSamePosition = true;
             }
+            hasMoreTokens = buffer.incrementToken();
           }
         } catch (IOException e) {
           // ignore
         }
       }
     } else {
+      // use pre-2.9 TokenStream API
+      reusableToken = new org.apache.lucene.analysis.Token();
+      
       while (true) {
         try {
           nextToken = buffer.next(reusableToken);
Index: src/java/org/apache/lucene/search/QueryTermVector.java
===================================================================
--- src/java/org/apache/lucene/search/QueryTermVector.java	(revision 784591)
+++ src/java/org/apache/lucene/search/QueryTermVector.java	(working copy)
@@ -29,6 +29,7 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.TokenStream.UnsupportedTokenStreamAPIException;
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.index.TermFreqVector;
 
@@ -59,13 +60,28 @@
       {
         List terms = new ArrayList();
         try {
-          if (stream.useNewAPI()) {
-            stream.reset();
-            TermAttribute termAtt = (TermAttribute) stream.getAttribute(TermAttribute.class);
-            while (stream.incrementToken()) {
+          boolean useNewAPI = true;
+          boolean hasMoreTokens = false;
+          
+          stream.reset(); 
+          TermAttribute termAtt = (TermAttribute) stream.getAttribute(TermAttribute.class);
+
+          try {
+            hasMoreTokens = stream.incrementToken();
+          } catch (IOException e) {
+            // ignore
+          } catch (UnsupportedTokenStreamAPIException e) {
+            // fall back to old API
+            useNewAPI = false;
+          }
+
+          if (useNewAPI) {
+            while (hasMoreTokens) {
               terms.add(termAtt.term());
+              hasMoreTokens = stream.incrementToken();
             }
           } else {  
+            // use pre-2.9 TokenStream API
             final Token reusableToken = new Token();
             for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
               terms.add(nextToken.term());
Index: src/java/org/apache/lucene/util/Attribute.java
===================================================================
--- src/java/org/apache/lucene/util/Attribute.java	(revision 784591)
+++ src/java/org/apache/lucene/util/Attribute.java	(working copy)
@@ -1,95 +1,5 @@
 package org.apache.lucene.util;
 
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+public interface Attribute {
 
-import java.io.Serializable;
-
-/**
- * Base class for Attributes that can be added to a 
- * {@link org.apache.lucene.util.AttributeSource}.
- * <p>
- * Attributes are used to add data in a dynamic, yet type-safe way to a source
- * of usually streamed objects, e. g. a {@link org.apache.lucene.analysis.TokenStream}.
- * <p><font color="#FF0000">
- * WARNING: The status of the new TokenStream, AttributeSource and Attributes is experimental. 
- * The APIs introduced in these classes with Lucene 2.9 might change in the future. 
- * We will make our best efforts to keep the APIs backwards-compatible.</font>
- */
-public abstract class Attribute implements Cloneable, Serializable {  
-  /**
-   * Clears the values in this Attribute and resets it to its 
-   * default value.
-   */
-  public abstract void clear();
-  
-  /**
-   * Subclasses must implement this method and should follow a syntax
-   * similar to this one:
-   * 
-   * <pre>
-   *   public String toString() {
-   *     return "start=" + startOffset + ",end=" + endOffset;
-   *   }
-   * </pre>
-   */
-  public abstract String toString();
-  
-  /**
-   * Subclasses must implement this method and should compute
-   * a hashCode similar to this:
-   * <pre>
-   *   public int hashCode() {
-   *     int code = startOffset;
-   *     code = code * 31 + endOffset;
-   *     return code;
-   *   }
-   * </pre> 
-   * 
-   * see also {@link #equals(Object)}
-   */
-  public abstract int hashCode();
-  
-  /**
-   * All values used for computation of {@link #hashCode()} 
-   * should be checked here for equality.
-   * 
-   * see also {@link Object#equals(Object)}
-   */
-  public abstract boolean equals(Object other);
-  
-  /**
-   * Copies the values from this Attribute into the passed-in
-   * target attribute. The type of the target must match the type
-   * of this attribute. 
-   */
-  public abstract void copyTo(Attribute target);
-    
-  /**
-   * Shallow clone. Subclasses must override this if they 
-   * need to clone any members deeply,
-   */
-  public Object clone() {
-    Object clone = null;
-    try {
-      clone = super.clone();
-    } catch (CloneNotSupportedException e) {
-      throw new RuntimeException(e);  // shouldn't happen
-    }
-    return clone;
-  }
 }
Index: src/java/org/apache/lucene/util/AttributeImpl.java
===================================================================
--- src/java/org/apache/lucene/util/AttributeImpl.java	(revision 784591)
+++ src/java/org/apache/lucene/util/AttributeImpl.java	(working copy)
@@ -18,6 +18,7 @@
  */
 
 import java.io.Serializable;
+import java.lang.reflect.Field;
 
 /**
  * Base class for Attributes that can be added to a 
@@ -30,7 +31,7 @@
  * The APIs introduced in these classes with Lucene 2.9 might change in the future. 
  * We will make our best efforts to keep the APIs backwards-compatible.</font>
  */
-public abstract class Attribute implements Cloneable, Serializable {  
+public abstract class AttributeImpl implements Cloneable, Serializable {  
   /**
    * Clears the values in this Attribute and resets it to its 
    * default value.
@@ -38,16 +39,43 @@
   public abstract void clear();
   
   /**
-   * Subclasses must implement this method and should follow a syntax
-   * similar to this one:
+   * The default implementation of this method accesses all declared
+   * fields of this object and prints the values in the following syntax:
    * 
    * <pre>
    *   public String toString() {
    *     return "start=" + startOffset + ",end=" + endOffset;
    *   }
    * </pre>
+   * 
+   * This method may be overridden by subclasses.
    */
-  public abstract String toString();
+  public String toString() {
+    StringBuffer buffer = new StringBuffer();
+    Class clazz = this.getClass();
+    Field[] fields = clazz.getDeclaredFields();
+    try {
+      for (int i = 0; i < fields.length; i++) {
+        Field f = fields[i];
+        f.setAccessible(true);
+        Object value = f.get(this);
+        if (value == null) {
+          buffer.append(f.getName() + "=null");
+        } else {
+          buffer.append(f.getName() + "=" + value);
+        }
+        if (i < fields.length - 1) {
+          buffer.append(',');
+        }
+      }
+    } catch (IllegalAccessException e) {
+      // this should never happen, because we're just accessing fields
+      // from 'this'
+      throw new RuntimeException(e);
+    }
+    
+    return buffer.toString();
+  }
   
   /**
    * Subclasses must implement this method and should compute
@@ -77,7 +105,7 @@
    * target attribute. The type of the target must match the type
    * of this attribute. 
    */
-  public abstract void copyTo(Attribute target);
+  public abstract void copyTo(AttributeImpl target);
     
   /**
    * Shallow clone. Subclasses must override this if they 

Property changes on: src/java/org/apache/lucene/util/AttributeImpl.java
___________________________________________________________________
Added: svn:mergeinfo
   Merged /lucene/java/branches/lucene_2_4/src/java/org/apache/lucene/util/Attribute.java:r748824

Index: src/java/org/apache/lucene/util/AttributeSource.java
===================================================================
--- src/java/org/apache/lucene/util/AttributeSource.java	(revision 784591)
+++ src/java/org/apache/lucene/util/AttributeSource.java	(working copy)
@@ -19,13 +19,14 @@
 
 import java.util.Iterator;
 import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.lucene.analysis.TokenStream;
 
-
 /**
- * An AttributeSource contains a list of different {@link Attribute}s,
+ * An AttributeSource contains a list of different {@link AttributeImpl}s,
  * and methods to add and get them. There can only be a single instance
  * of an attribute in the same AttributeSource instance. This is ensured
  * by passing in the actual type of the Attribute (Class&lt;Attribute&gt;) to 
@@ -39,7 +40,103 @@
  * We will make our best efforts to keep the APIs backwards-compatible.</font>
  */
 public class AttributeSource {
+  public static abstract class AttributeFactory {
+    public abstract AttributeImpl createAttributeInstance(Class attClass);
+    
+    public static final AttributeFactory DefaultAttributeFactory = new AttributeFactory() {
+      public AttributeImpl createAttributeInstance(Class attClass) {
+        try {
+          return (AttributeImpl) resolveClassName(attClass.getName()).newInstance();
+        } catch (InstantiationException e) {
+          throw new IllegalArgumentException("Could not instantiate class " + attClass);
+        } catch (IllegalAccessException e) {
+          throw new IllegalArgumentException("Could not instantiate class " + attClass);      
+        }
+      }
+      
+      protected Class resolveClassName(String attClassName) {
+        try {
+          attClassName += "Impl";
+          return Class.forName(attClassName);
+        } catch (ClassNotFoundException e) {
+          throw new IllegalArgumentException("Could not find implementing class " + attClassName);
+        }
+      }
+    };
+  }
+  
+  public static final class State implements Cloneable {
+    private AttributeImpl attribute;
+    private State next;
+    
+    public Object clone() {
+      State clone = new State();
+      clone.attribute = (AttributeImpl) attribute.clone();
+      
+      if (next != null) {
+        clone.next = (State) next.clone();
+      }
+      
+      return clone;
+    }
+  }
+  
+  private State currentState;
+  
+  private void computeCurrentState() {
+    // find all unique AttributeImpls
+    Set impls = new LinkedHashSet();
+    impls.addAll(attributes.values());
+    Iterator it = impls.iterator();
+    currentState = new State();
+    
+    State c = currentState;
+    c.attribute = (AttributeImpl) it.next();
+    while (it.hasNext()) {
+      c.next = new State();
+      c = c.next;
+      c.attribute = (AttributeImpl) it.next();
+    }        
+  }
+  
+  public State captureState() {
+    if (currentState == null) {
+      computeCurrentState();
+    }
+    return (State) this.currentState.clone();
+  }
+  
   /**
+   * Restores this state by copying the values of all attributes 
+   * that this state contains into the attributes of the targetStream.
+   * The targetStream must contain a corresponding instance for each argument
+   * contained in this state.
+   * <p>
+   * Note that this method does not affect attributes of the targetStream
+   * that are not contained in this state. In other words, if for example
+   * the targetStream contains an OffsetAttribute, but this state doesn't, then
+   * the value of the OffsetAttribute remains unchanged. It might be desirable to
+   * reset its value to the default, in which case the caller should first
+   * call {@link TokenStream#clearAttributes()} on the targetStream.   
+   */
+  public void restoreState(State state) {
+    if (currentState == null) {
+      computeCurrentState();
+    }
+    
+    State source = state;
+    State target = currentState;
+    
+    do {
+      assert source != null && target != null && source.getClass() == target.getClass();
+      source.attribute.copyTo(target.attribute);
+      source = source.next;
+      target = target.next;
+    } while (source != null);
+  }
+
+    
+  /**
    * An AttributeAcceptor defines only a single method {@link #accept(Class)}.
    * It can be used for e. g. buffering purposes to specify which attributes
    * to buffer. 
@@ -56,13 +153,20 @@
     public boolean accept(Class attClass) {return true;}      
   };
 
+  private AttributeFactory factory = AttributeFactory.DefaultAttributeFactory;
+  
   /**
    * Holds the Class&lt;Attribute&gt; -> Attribute mapping
    */
   protected Map attributes;
 
+  protected void initialize() {
+    
+  }
+  
   public AttributeSource() {
     this.attributes = new LinkedHashMap();
+    initialize();
   }
   
   public AttributeSource(AttributeSource input) {
@@ -70,8 +174,17 @@
       throw new IllegalArgumentException("input AttributeSource must not be null");
     }
     this.attributes = input.attributes;
+    initialize();
   }
   
+  public void setAttributeFactory(AttributeFactory factory) {
+    this.factory = factory;
+  }
+  
+  public AttributeFactory getAttributeFactory() {
+    return this.factory;
+  }
+  
   /** Returns an iterator that iterates the attributes 
    * in the same order they were added in.
    */
@@ -79,24 +192,39 @@
     return attributes.values().iterator();
   }
   
+  public void addAttributeImpl(AttributeImpl att) {
+    // find all interfaces that this attribute instance implements
+    // and that extend the Attribute interface
+    Class clazz = att.getClass(); 
+    do {
+      Class[] interfaces = att.getClass().getInterfaces();
+      for (int i = 0; i < interfaces.length; i++) {
+        Class curInterface = interfaces[i];
+        if (Attribute.class.isAssignableFrom(curInterface)) {
+          // Attribute is a superclass of this interface
+          if (!attributes.containsKey(curInterface)) {
+            // invalidate state to force recomputation in captureState()
+            this.currentState = null;
+            attributes.put(curInterface, att);
+          }
+        }
+      }
+      
+      clazz = clazz.getSuperclass();
+    } while (clazz != null);    
+  }
+  
   /**
    * The caller must pass in a Class&lt;? extends Attribute&gt; value.
    * This method first checks if an instance of that class is 
    * already in this AttributeSource and returns it. Otherwise a
    * new instance is created, added to this AttributeSource and returned. 
    */
-  public Attribute addAttribute(Class attClass) {
-    Attribute att = (Attribute) attributes.get(attClass);
+  public AttributeImpl addAttribute(Class attClass) {
+    AttributeImpl att = (AttributeImpl) attributes.get(attClass);
     if (att == null) {
-      try {
-        att = (Attribute) attClass.newInstance();
-      } catch (InstantiationException e) {
-        throw new IllegalArgumentException("Could not instantiate class " + attClass);
-      } catch (IllegalAccessException e) {
-        throw new IllegalArgumentException("Could not instantiate class " + attClass);      
-      }
-      
-      attributes.put(attClass, att);
+      att = this.factory.createAttributeInstance(attClass);
+      addAttributeImpl(att);
     }
     return att;
   }
@@ -121,8 +249,8 @@
    * @throws IllegalArgumentException if this AttributeSource does not contain the
    *         Attribute
    */
-  public Attribute getAttribute(Class attClass) {
-    Attribute att = (Attribute) this.attributes.get(attClass);
+  public AttributeImpl getAttribute(Class attClass) {
+    AttributeImpl att = (AttributeImpl) this.attributes.get(attClass);
     if (att == null) {
       throw new IllegalArgumentException("This token does not have the attribute '" + attClass + "'.");
     }
@@ -132,12 +260,12 @@
   
   /**
    * Resets all Attributes in this AttributeSource by calling
-   * {@link Attribute#clear()} on each Attribute.
+   * {@link AttributeImpl#clear()} on each Attribute.
    */
   public void clearAttributes() {
-    Iterator it = getAttributesIterator();
+    Iterator it = this.attributes.values().iterator();
     while (it.hasNext()) {
-      ((Attribute) it.next()).clear();
+      ((AttributeImpl) it.next()).clear();
     }
   }
   
@@ -145,27 +273,16 @@
    * Captures the current state of the passed in TokenStream.
    * <p>
    * This state will contain all of the passed in TokenStream's
-   * {@link Attribute}s. If only a subset of the attributes is needed
-   * please use {@link #captureState(AttributeAcceptor)} 
+   * {@link AttributeImpl}s which the {@link AttributeAcceptor} accepts. 
    */
-  public AttributeSource captureState() {
-    return captureState(AllAcceptor);
-  }
-
-  /**
-   * Captures the current state of the passed in TokenStream.
-   * <p>
-   * This state will contain all of the passed in TokenStream's
-   * {@link Attribute}s which the {@link AttributeAcceptor} accepts. 
-   */
   public AttributeSource captureState(AttributeAcceptor acceptor) {
     AttributeSource state = new AttributeSource();
      
     Iterator it = getAttributesIterator();
     while(it.hasNext()) {
-      Attribute att = (Attribute) it.next();
+      AttributeImpl att = (AttributeImpl) it.next();
       if (acceptor.accept(att.getClass())) {
-        Attribute clone = (Attribute) att.clone();
+        AttributeImpl clone = (AttributeImpl) att.clone();
         state.attributes.put(att.getClass(), clone);
       }
     }
@@ -173,28 +290,6 @@
     return state;
   }
   
-  /**
-   * Restores this state by copying the values of all attributes 
-   * that this state contains into the attributes of the targetStream.
-   * The targetStream must contain a corresponding instance for each argument
-   * contained in this state.
-   * <p>
-   * Note that this method does not affect attributes of the targetStream
-   * that are not contained in this state. In other words, if for example
-   * the targetStream contains an OffsetAttribute, but this state doesn't, then
-   * the value of the OffsetAttribute remains unchanged. It might be desirable to
-   * reset its value to the default, in which case the caller should first
-   * call {@link TokenStream#clearAttributes()} on the targetStream.   
-   */
-  public void restoreState(AttributeSource target) {
-    Iterator it = getAttributesIterator();
-    while (it.hasNext()) {
-      Attribute att = (Attribute) it.next();
-      Attribute targetAtt = target.getAttribute(att.getClass());
-      att.copyTo(targetAtt);
-    }
-  }
-  
   public int hashCode() {
     int code = 0;
     if (hasAttributes()) {
@@ -228,7 +323,7 @@
         while (it.hasNext()) {
           Class attName = it.next().getClass();
           
-          Attribute otherAtt = (Attribute) other.attributes.get(attName);
+          AttributeImpl otherAtt = (AttributeImpl) other.attributes.get(attName);
           if (otherAtt == null || !otherAtt.equals(attributes.get(attName))) {
             return false;
           }
Index: src/test/org/apache/lucene/index/TestDocumentWriter.java
===================================================================
--- src/test/org/apache/lucene/index/TestDocumentWriter.java	(revision 784591)
+++ src/test/org/apache/lucene/index/TestDocumentWriter.java	(working copy)
@@ -141,11 +141,11 @@
       public TokenStream tokenStream(String fieldName, Reader reader) {
         return new TokenFilter(new WhitespaceTokenizer(reader)) {
           boolean first=true;
-          AttributeSource state;
+          AttributeSource.State state;
 
           public boolean incrementToken() throws IOException {
             if (state != null) {
-              state.restoreState(this);
+              restoreState(state);
               payloadAtt.setPayload(null);
               posIncrAtt.setPositionIncrement(0);
               termAtt.setTermBuffer(new char[]{'b'}, 0, 1);
Index: src/test/org/apache/lucene/index/TestIndexWriter.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriter.java	(revision 784591)
+++ src/test/org/apache/lucene/index/TestIndexWriter.java	(working copy)
@@ -3540,14 +3540,14 @@
     tokens.addAttribute(TermAttribute.class);
     tokens.addAttribute(PositionIncrementAttribute.class);
 
-    AttributeSource state = new AttributeSource();
+    TokenStream state = new TokenStream(){};
     TermAttribute termAtt = (TermAttribute) state.addAttribute(TermAttribute.class);
     PositionIncrementAttribute posIncrAtt = (PositionIncrementAttribute) state.addAttribute(PositionIncrementAttribute.class);
     termAtt.setTermBuffer("a");
     posIncrAtt.setPositionIncrement(0);
     tokens.add(state);
 
-    state = new AttributeSource();
+    state = new TokenStream(){};
     termAtt = (TermAttribute) state.addAttribute(TermAttribute.class);
     posIncrAtt = (PositionIncrementAttribute) state.addAttribute(PositionIncrementAttribute.class);
 
@@ -3555,7 +3555,7 @@
     posIncrAtt.setPositionIncrement(1);
     tokens.add(state);
     
-    state = new AttributeSource();
+    state = new TokenStream(){};
     termAtt = (TermAttribute) state.addAttribute(TermAttribute.class);
     posIncrAtt = (PositionIncrementAttribute) state.addAttribute(PositionIncrementAttribute.class);
 
Index: src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java
===================================================================
--- src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java	(revision 784591)
+++ src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java	(working copy)
@@ -320,6 +320,10 @@
       public Token next(final Token reusableToken) {
         return null;
       }
+      
+      public boolean incrementToken() {
+        return false;
+      }
     }
   }
 
Index: src/test/org/apache/lucene/util/LuceneTestCase.java
===================================================================
--- src/test/org/apache/lucene/util/LuceneTestCase.java	(revision 784591)
+++ src/test/org/apache/lucene/util/LuceneTestCase.java	(working copy)
@@ -44,7 +44,6 @@
 
   protected void setUp() throws Exception {
     ConcurrentMergeScheduler.setTestMode();
-    TokenStream.setUseNewAPIDefault(true);
   }
 
   protected void tearDown() throws Exception {
Index: test/file
===================================================================
