Index: lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMAAnnotationsTokenizerFactory.java
===================================================================
--- lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMAAnnotationsTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMAAnnotationsTokenizerFactory.java	(working copy)
@@ -20,6 +20,7 @@
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
 import org.apache.lucene.analysis.uima.UIMAAnnotationsTokenizer;
+import org.apache.lucene.util.AttributeSource;
 
 import java.io.Reader;
 import java.util.Map;
@@ -46,4 +47,9 @@
   public Tokenizer create(Reader input) {
     return new UIMAAnnotationsTokenizer(descriptorPath, tokenType, input);
   }
+  
+  @Override
+  public Tokenizer create(AttributeSource source, Reader input) {
+    throw new UnsupportedOperationException();
+  }
 }
Index: lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMATypeAwareAnnotationsTokenizerFactory.java
===================================================================
--- lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMATypeAwareAnnotationsTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMATypeAwareAnnotationsTokenizerFactory.java	(working copy)
@@ -20,6 +20,7 @@
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.uima.UIMATypeAwareAnnotationsTokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 import java.io.Reader;
 import java.util.Map;
@@ -48,4 +49,9 @@
   public Tokenizer create(Reader input) {
     return new UIMATypeAwareAnnotationsTokenizer(descriptorPath, tokenType, featurePath, input);
   }
+  
+  @Override
+  public Tokenizer create(AttributeSource source, Reader input) {
+    throw new UnsupportedOperationException();
+  }
 }
Index: lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizerFactory.java
===================================================================
--- lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizerFactory.java	(working copy)
@@ -32,6 +32,7 @@
 import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode;
 import org.apache.lucene.analysis.ja.dict.UserDictionary;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.analysis.util.ResourceLoader;
 import org.apache.lucene.analysis.util.ResourceLoaderAware;
@@ -93,6 +94,11 @@
     return new JapaneseTokenizer(input, userDictionary, discardPunctuation, mode);
   }
   
+  @Override
+  public Tokenizer create(AttributeSource source, Reader input) {
+    throw new UnsupportedOperationException();
+  }
+  
   private Mode getMode(Map<String, String> args) {
     String mode = args.get(MODE);
     if (mode != null) {
Index: lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java
===================================================================
--- lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java	(working copy)
@@ -30,6 +30,7 @@
 import org.apache.lucene.analysis.util.ResourceLoader;
 import org.apache.lucene.analysis.util.ResourceLoaderAware;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.IOUtils;
 
 import com.ibm.icu.lang.UCharacter;
@@ -148,4 +149,9 @@
     assert config != null : "inform must be called first!";
     return new ICUTokenizer(input, config);
   }
+  
+  @Override
+  public Tokenizer create(AttributeSource source, Reader input) {
+    throw new UnsupportedOperationException();
+  }
 }
Index: lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseSentenceTokenizerFactory.java
===================================================================
--- lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseSentenceTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseSentenceTokenizerFactory.java	(working copy)
@@ -22,6 +22,7 @@
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.cn.smart.SentenceTokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 /**
  * Factory for the SmartChineseAnalyzer {@link SentenceTokenizer}
@@ -32,4 +33,9 @@
   public Tokenizer create(Reader input) {
     return new SentenceTokenizer(input);
   }
+  
+  @Override
+  public Tokenizer create(AttributeSource source, Reader input) {
+    return new SentenceTokenizer(source, input);
+  }
 }
Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java
===================================================================
--- lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java	(revision 1424481)
+++ lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java	(working copy)
@@ -33,6 +33,7 @@
 import org.apache.lucene.analysis.util.StringMockResourceLoader;
 import org.apache.lucene.analysis.util.TokenFilterFactory;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 /**
  * Sanity check some things about all factories,
@@ -149,6 +150,11 @@
     public Tokenizer create(Reader input) {
       return new MockTokenizer(input);
     }
+    
+    @Override
+    public Tokenizer create(AttributeSource source, Reader input) {
+      throw new UnsupportedOperationException();
+    }
   };
   
   private static class FactoryAnalyzer extends Analyzer {
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java	(working copy)
@@ -25,6 +25,7 @@
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.pattern.PatternTokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 /**
  * Factory for {@link PatternTokenizer}.
@@ -98,4 +99,9 @@
       throw new RuntimeException("IOException thrown creating PatternTokenizer instance", ex);
     }
   }
+  
+  @Override
+  public Tokenizer create(AttributeSource source, Reader input) {
+    throw new UnsupportedOperationException();
+  }
 }
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenizerFactory.java	(working copy)
@@ -18,6 +18,7 @@
  */
 
 import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.util.AttributeSource;
 
 import java.io.Reader;
 import java.util.Set;
@@ -63,4 +64,8 @@
 
   /** Creates a TokenStream of the specified input */
   public abstract Tokenizer create(Reader input);
+  
+  /** Creates a TokenStream of the specified input using the given AttributeSource */
+  public abstract Tokenizer create(AttributeSource source, Reader input);
+  
 }
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizerFactory.java	(working copy)
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.analysis.core.LetterTokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 import java.io.Reader;
 import java.util.Map;
@@ -45,4 +46,9 @@
   public LetterTokenizer create(Reader input) {
     return new LetterTokenizer(luceneMatchVersion, input);
   }
+  
+  @Override
+  public LetterTokenizer create(AttributeSource source, Reader input) {
+    return new LetterTokenizer(luceneMatchVersion, source, input);
+  }
 }
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizerFactory.java	(working copy)
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.analysis.core.KeywordTokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 import java.io.Reader;
 
@@ -37,4 +38,9 @@
   public KeywordTokenizer create(Reader input) {
     return new KeywordTokenizer(input);
   }
+  
+  @Override
+  public KeywordTokenizer create(AttributeSource source, Reader input) {
+    return new KeywordTokenizer(source, input, KeywordTokenizer.DEFAULT_BUFFER_SIZE);
+  }
 }
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizerFactory.java	(working copy)
@@ -21,6 +21,7 @@
 import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
 import org.apache.lucene.analysis.util.MultiTermAwareComponent;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 import java.io.Reader;
 import java.util.Map;
@@ -46,6 +47,11 @@
   public LowerCaseTokenizer create(Reader input) {
     return new LowerCaseTokenizer(luceneMatchVersion,input);
   }
+  
+  @Override
+  public LowerCaseTokenizer create(AttributeSource source, Reader input) {
+    return new LowerCaseTokenizer(luceneMatchVersion, source, input);
+  }
 
   @Override
   public AbstractAnalysisFactory getMultiTermComponent() {
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizerFactory.java	(working copy)
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.analysis.core.WhitespaceTokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 import java.io.Reader;
 import java.util.Map;
@@ -44,4 +45,9 @@
   public WhitespaceTokenizer create(Reader input) {
     return new WhitespaceTokenizer(luceneMatchVersion,input);
   }
+  
+  @Override
+  public WhitespaceTokenizer create(AttributeSource source, Reader input) {
+    return new WhitespaceTokenizer(luceneMatchVersion, source, input);
+  }
 }
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerFactory.java	(working copy)
@@ -20,6 +20,7 @@
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.analysis.standard.StandardTokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 import java.io.Reader;
 import java.util.Map;
@@ -54,4 +55,12 @@
     tokenizer.setMaxTokenLength(maxTokenLength);
     return tokenizer;
   }
+  
+  @Override
+  public StandardTokenizer create(AttributeSource source, Reader input) {
+    StandardTokenizer tokenizer
+      = new StandardTokenizer(luceneMatchVersion, source, input); 
+    tokenizer.setMaxTokenLength(maxTokenLength);
+    return tokenizer;
+  }
 }
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerFactory.java	(working copy)
@@ -20,6 +20,7 @@
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 import java.io.Reader;
 import java.util.Map;
@@ -54,4 +55,11 @@
     tokenizer.setMaxTokenLength(maxTokenLength);
     return tokenizer;
   }
+  
+  @Override
+  public UAX29URLEmailTokenizer create(AttributeSource source, Reader input) {
+    UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(luceneMatchVersion, source, input); 
+    tokenizer.setMaxTokenLength(maxTokenLength);
+    return tokenizer;
+  }
 }
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerFactory.java	(working copy)
@@ -21,6 +21,7 @@
 import org.apache.lucene.analysis.standard.ClassicTokenizer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 import java.io.Reader;
 import java.util.Map;
@@ -55,4 +56,11 @@
     tokenizer.setMaxTokenLength(maxTokenLength);
     return tokenizer;
   }
+  
+  @Override
+  public Tokenizer create(AttributeSource source, Reader input) {
+    ClassicTokenizer tokenizer = new ClassicTokenizer(luceneMatchVersion, source, input); 
+    tokenizer.setMaxTokenLength(maxTokenLength);
+    return tokenizer;
+  }
 }
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerFactory.java	(working copy)
@@ -18,10 +18,12 @@
  */
 
 import java.io.Reader;
+import java.util.Collections;
 
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
 import org.apache.lucene.analysis.wikipedia.WikipediaTokenizer;
+import org.apache.lucene.util.AttributeSource;
 
 /** 
  * Factory for {@link WikipediaTokenizer}.
@@ -39,4 +41,10 @@
   public Tokenizer create(Reader input) {
     return new WikipediaTokenizer(input);
   }
+  
+  @Override
+  public Tokenizer create(AttributeSource source, Reader input) {
+    return new WikipediaTokenizer(source, input, WikipediaTokenizer.TOKENS_ONLY, 
+        Collections.<String>emptySet());
+  }
 }
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKTokenizerFactory.java	(working copy)
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.analysis.cjk.CJKTokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 import java.io.Reader;
 
@@ -37,5 +38,9 @@
   public CJKTokenizer create(Reader in) {
     return new CJKTokenizer(in);
   }
+  
+  public CJKTokenizer create(AttributeSource source, Reader in) {
+    return new CJKTokenizer(source, in);
+  }
 }
 
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLetterTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLetterTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLetterTokenizerFactory.java	(working copy)
@@ -20,9 +20,11 @@
 import java.io.Reader;
 import java.util.Map;
 
+import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.ru.RussianLetterTokenizer;
 import org.apache.lucene.analysis.standard.StandardTokenizerFactory; // javadocs
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 /** @deprecated Use {@link StandardTokenizerFactory} instead.
  *  This tokenizer has no Russian-specific functionality.
@@ -43,5 +45,10 @@
   public RussianLetterTokenizer create(Reader in) {
     return new RussianLetterTokenizer(luceneMatchVersion,in);
   }
+  
+  @Override
+  public Tokenizer create(AttributeSource source, Reader input) {
+    return new RussianLetterTokenizer(luceneMatchVersion, source, input);
+  }
 }
 
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizerFactory.java	(working copy)
@@ -24,6 +24,7 @@
 import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
 import org.apache.lucene.analysis.path.ReversePathHierarchyTokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 /**
  * Factory for {@link PathHierarchyTokenizer}. 
@@ -127,6 +128,11 @@
     }
     return new PathHierarchyTokenizer(input, delimiter, replacement, skip);
   }
+  
+  @Override
+  public Tokenizer create(AttributeSource source, Reader input) {
+    throw new UnsupportedOperationException();
+  }
 }
 
 
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseTokenizerFactory.java	(working copy)
@@ -22,6 +22,7 @@
 import org.apache.lucene.analysis.cn.ChineseTokenizer;
 import org.apache.lucene.analysis.standard.StandardTokenizerFactory; // javadocs
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 /** 
  * Factory for {@link ChineseTokenizer}
@@ -33,5 +34,9 @@
   public ChineseTokenizer create(Reader in) {
     return new ChineseTokenizer(in);
   }
+  
+  public ChineseTokenizer create(AttributeSource source, Reader in) {
+    return new ChineseTokenizer(source, in);
+  }
 }
 
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicLetterTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicLetterTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicLetterTokenizerFactory.java	(working copy)
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.analysis.ar.ArabicLetterTokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 import java.io.Reader;
 import java.util.Map;
@@ -40,4 +41,8 @@
   public ArabicLetterTokenizer create(Reader input) {
     return new ArabicLetterTokenizer(luceneMatchVersion, input);
   }
+  
+  public ArabicLetterTokenizer create(AttributeSource source, Reader input) {
+    return new ArabicLetterTokenizer(luceneMatchVersion, source, input);
+  }
 }
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizerFactory.java	(working copy)
@@ -20,6 +20,7 @@
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.ngram.NGramTokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 import java.io.Reader;
 import java.util.Map;
@@ -54,4 +55,11 @@
   public NGramTokenizer create(Reader input) {
     return new NGramTokenizer(input, minGramSize, maxGramSize);
   }
+  
+  /** Creates the {@link TokenStream} of n-grams from the given {@link Reader}
+   * and using the given {@link AttributeSource}. */
+  @Override
+  public NGramTokenizer create(AttributeSource source, Reader input) {
+    return new NGramTokenizer(source, input, minGramSize, maxGramSize);
+  }
 }
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerFactory.java	(revision 1424481)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerFactory.java	(working copy)
@@ -19,6 +19,7 @@
 
 import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 
 import java.io.Reader;
 import java.util.Map;
@@ -59,4 +60,9 @@
   public EdgeNGramTokenizer create(Reader input) {
     return new EdgeNGramTokenizer(input, side, minGramSize, maxGramSize);
   }
+  
+  @Override
+  public EdgeNGramTokenizer create(AttributeSource source, Reader input) {
+    return new EdgeNGramTokenizer(source, input, side, minGramSize, maxGramSize);
+  }
 }
Index: solr/test-framework/src/java/org/apache/solr/analysis/MockTokenizerFactory.java
===================================================================
--- solr/test-framework/src/java/org/apache/solr/analysis/MockTokenizerFactory.java	(revision 1424481)
+++ solr/test-framework/src/java/org/apache/solr/analysis/MockTokenizerFactory.java	(working copy)
@@ -23,6 +23,7 @@
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.automaton.CharacterRunAutomaton;
 
 /**
@@ -60,4 +61,9 @@
     t.setEnableChecks(enableChecks);
     return t;
   }
+  
+  @Override
+  public Tokenizer create(AttributeSource source, Reader input) {
+    throw new UnsupportedOperationException();
+  }
 }
\ No newline at end of file
Index: solr/core/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java
===================================================================
--- solr/core/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java	(revision 1424481)
+++ solr/core/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java	(working copy)
@@ -20,6 +20,7 @@
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.schema.DateField;
 import static org.apache.solr.schema.TrieField.TrieTypes;
@@ -52,6 +53,11 @@
   public TrieTokenizer create(Reader input) {
     return new TrieTokenizer(input, type, precisionStep, TrieTokenizer.getNumericTokenStream(precisionStep));
   }
+  
+  @Override
+  public TrieTokenizer create(AttributeSource source, Reader input) {
+    throw new UnsupportedOperationException();
+  }
 }
 
 final class TrieTokenizer extends Tokenizer {
