emptySet());
+ }
}
Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java
===================================================================
--- lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java (revision 1455623)
+++ lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java (working copy)
@@ -146,7 +146,7 @@
// some silly classes just so we can use checkRandomData
private TokenizerFactory assertingTokenizer = new TokenizerFactory() {
@Override
- public Tokenizer create(Reader input) {
+ public MockTokenizer create(Reader input) {
return new MockTokenizer(input);
}
};
Index: lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java
===================================================================
--- lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java (revision 1455623)
+++ lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java (working copy)
@@ -144,7 +144,7 @@
}
@Override
- public Tokenizer create(Reader input) {
+ public ICUTokenizer create(Reader input) {
assert config != null : "inform must be called first!";
return new ICUTokenizer(input, config);
}
Index: lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizerFactory.java
===================================================================
--- lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizerFactory.java (revision 1455623)
+++ lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizerFactory.java (working copy)
@@ -89,7 +89,7 @@
}
@Override
- public Tokenizer create(Reader input) {
+ public JapaneseTokenizer create(Reader input) {
return new JapaneseTokenizer(input, userDictionary, discardPunctuation, mode);
}
Index: lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SentenceTokenizer.java
===================================================================
--- lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SentenceTokenizer.java (revision 1455623)
+++ lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SentenceTokenizer.java (working copy)
@@ -52,10 +52,6 @@
super(reader);
}
- public SentenceTokenizer(AttributeSource source, Reader reader) {
- super(source, reader);
- }
-
public SentenceTokenizer(AttributeFactory factory, Reader reader) {
super(factory, reader);
}
Index: lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseSentenceTokenizerFactory.java
===================================================================
--- lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseSentenceTokenizerFactory.java (revision 1455623)
+++ lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseSentenceTokenizerFactory.java (working copy)
@@ -19,9 +19,8 @@
import java.io.Reader;
-import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.cn.smart.SentenceTokenizer;
import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.AttributeSource.AttributeFactory;
/**
* Factory for the SmartChineseAnalyzer {@link SentenceTokenizer}
@@ -29,7 +28,12 @@
*/
public class SmartChineseSentenceTokenizerFactory extends TokenizerFactory {
@Override
- public Tokenizer create(Reader input) {
+ public SentenceTokenizer create(Reader input) {
return new SentenceTokenizer(input);
}
+
+ @Override
+ public SentenceTokenizer create(AttributeFactory factory, Reader input) {
+ return new SentenceTokenizer(factory, input);
+ }
}
Index: lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMAAnnotationsTokenizerFactory.java
===================================================================
--- lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMAAnnotationsTokenizerFactory.java (revision 1455623)
+++ lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMAAnnotationsTokenizerFactory.java (working copy)
@@ -17,9 +17,7 @@
* limitations under the License.
*/
-import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.util.TokenizerFactory;
-import org.apache.lucene.analysis.uima.UIMAAnnotationsTokenizer;
import java.io.Reader;
import java.util.HashMap;
@@ -54,7 +52,7 @@
}
@Override
- public Tokenizer create(Reader input) {
+ public UIMAAnnotationsTokenizer create(Reader input) {
return new UIMAAnnotationsTokenizer(descriptorPath, tokenType, configurationParameters, input);
}
}
Index: lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMATypeAwareAnnotationsTokenizerFactory.java
===================================================================
--- lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMATypeAwareAnnotationsTokenizerFactory.java (revision 1455623)
+++ lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMATypeAwareAnnotationsTokenizerFactory.java (working copy)
@@ -17,7 +17,6 @@
* limitations under the License.
*/
-import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.util.TokenizerFactory;
import java.io.Reader;
@@ -55,7 +54,7 @@
}
@Override
- public Tokenizer create(Reader input) {
+ public UIMATypeAwareAnnotationsTokenizer create(Reader input) {
return new UIMATypeAwareAnnotationsTokenizer(descriptorPath, tokenType, featurePath, configurationParameters, input);
}
}
Index: lucene/core/src/java/org/apache/lucene/analysis/Tokenizer.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/analysis/Tokenizer.java (revision 1455623)
+++ lucene/core/src/java/org/apache/lucene/analysis/Tokenizer.java (working copy)
@@ -47,13 +47,6 @@
this.input = input;
}
- /** Construct a token stream processing the given input using the given AttributeSource. */
- protected Tokenizer(AttributeSource source, Reader input) {
- super(source);
- assert input != null: "input must not be null";
- this.input = input;
- }
-
/**
* {@inheritDoc}
*
Index: solr/core/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java
===================================================================
--- solr/core/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java (revision 1455623)
+++ solr/core/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java (working copy)
@@ -22,12 +22,15 @@
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.util.Attribute;
+import org.apache.lucene.util.AttributeImpl;
import org.apache.solr.common.SolrException;
import org.apache.solr.schema.DateField;
import static org.apache.solr.schema.TrieField.TrieTypes;
import java.io.IOException;
import java.io.Reader;
+import java.util.Iterator;
/**
* Tokenizer for trie fields. It uses NumericTokenStream to create multiple trie encoded string per number.
@@ -71,9 +74,18 @@
return new NumericTokenStream(precisionStep);
}
- public TrieTokenizer(Reader input, TrieTypes type, NumericTokenStream ts) {
- // must share the attribute source with the NumericTokenStream we delegate to
- super(ts, input);
+ public TrieTokenizer(Reader input, TrieTypes type, final NumericTokenStream ts) {
+ // Häckidy-Hick-Hack: must share the attributes with the NumericTokenStream we delegate to, so we create a fake factory:
+ super(new AttributeFactory() {
+ @Override
+ public AttributeImpl createAttributeInstance(Class extends Attribute> attClass) {
+ return (AttributeImpl) ts.addAttribute(attClass);
+ }
+ }, input);
+ // add all attributes:
+ for (Iterator> it = ts.getAttributeClassesIterator(); it.hasNext();) {
+ addAttribute(it.next());
+ }
this.type = type;
this.ts = ts;
// dates tend to be longer, especially when math is involved
Index: solr/test-framework/src/java/org/apache/solr/analysis/MockCharFilterFactory.java
===================================================================
--- solr/test-framework/src/java/org/apache/solr/analysis/MockCharFilterFactory.java (revision 1455623)
+++ solr/test-framework/src/java/org/apache/solr/analysis/MockCharFilterFactory.java (working copy)
@@ -20,7 +20,6 @@
import java.io.Reader;
import java.util.Map;
-import org.apache.lucene.analysis.CharFilter;
import org.apache.lucene.analysis.MockCharFilter;
import org.apache.lucene.analysis.util.CharFilterFactory;
@@ -41,7 +40,7 @@
}
@Override
- public CharFilter create(Reader input) {
+ public MockCharFilter create(Reader input) {
return new MockCharFilter(input, remainder);
}
}
Index: solr/test-framework/src/java/org/apache/solr/analysis/MockTokenizerFactory.java
===================================================================
--- solr/test-framework/src/java/org/apache/solr/analysis/MockTokenizerFactory.java (revision 1455623)
+++ solr/test-framework/src/java/org/apache/solr/analysis/MockTokenizerFactory.java (working copy)
@@ -55,7 +55,7 @@
@Override
- public Tokenizer create(Reader input) {
+ public MockTokenizer create(Reader input) {
MockTokenizer t = new MockTokenizer(input, pattern, false);
t.setEnableChecks(enableChecks);
return t;