Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestMultiWordSynonyms.java
===================================================================
--- lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestMultiWordSynonyms.java	(revision 1393962)
+++ lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestMultiWordSynonyms.java	(working copy)
@@ -17,22 +17,95 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.synonym.SynonymFilterFactory;
+import org.apache.lucene.analysis.util.ResourceLoader;
 import org.apache.lucene.analysis.util.StringMockResourceLoader;
+import org.apache.lucene.analysis.synonym.SynonymFilter;
+import org.apache.lucene.analysis.synonym.SynonymFilterFactory.SynonymParser;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.apache.lucene.util.CharsRef;
 
 import java.io.IOException;
 import java.io.StringReader;
 import java.util.HashMap;
 import java.util.Map;
 
-/**
- * @since solr 1.4
- */
+
 public class TestMultiWordSynonyms extends BaseTokenStreamTestCase {
   
+  private StringMockResourceLoader getSyn() {
+    return new StringMockResourceLoader(
+        "hubble\0space\0telescope,HST,hs telescope\n" +
+        "foo\0bar,foo ba,fu ba,foobar\n" +
+        "foo\0baz,fu ba");
+  }
+  
+  /*
+   * Always include the source token before the synonym
+   */
+  public static class TestParserInclOrig extends SynonymFilterFactory.SynonymBuilderFactory {
+    protected SynonymParser getParser(Analyzer analyzer) {
+      return new SolrSynonymParser(true, true, analyzer) {
+        @Override
+        public void add(CharsRef input, CharsRef output, boolean includeOrig) {
+          super.add(input, output, true);
+        }
+      };
+    }
+  }
+  
+  
+  /*
+   * Recognize "multi\0word\0synonyms" (null bytes in the input string) 
+   * but emit "multi word synonyms" in the output
+   */
+  public static class TestParserReplaceNulls extends SynonymFilterFactory.SynonymBuilderFactory {
+    protected SynonymParser getParser(Analyzer analyzer) {
+      return new SolrSynonymParser(true, true, analyzer) {
+        @Override
+        public void add(CharsRef input, CharsRef output, boolean includeOrig) {
+          super.add(input, replaceNulls(output), includeOrig);
+        }
+      };
+    }
+  }
+  
+  public static CharsRef replaceNulls(CharsRef charsRef) {
+    CharsRef sanChar = CharsRef.deepCopyOf(charsRef);
+    final int end = sanChar.offset + sanChar.length;
+    for(int idx=sanChar.offset+1;idx<end;idx++) {
+      if (sanChar.chars[idx] == SynonymMap.WORD_SEPARATOR) {
+        sanChar.chars[idx] = ' ';
+      }
+    }
+    return sanChar;
+  }
+  
+  /*
+   * This parser is useful if you want to index multi-token synonyms (as one token)
+   * as well as their components. Ie. "hubble space telescope was..." will be 
+   * indexed as
+   * 0: hubble|hubble space telescope|HST
+   * 1: space
+   * 2: telescope
+   */
+  public static class TestParserReplaceNullsInclOrig extends SynonymFilterFactory.SynonymBuilderFactory {
+    protected SynonymParser getParser(Analyzer analyzer) {
+      return new SolrSynonymParser(true, true, analyzer) {
+        @Override
+        public void add(CharsRef input, CharsRef output, boolean includeOrig) {
+          super.add(input, replaceNulls(output), true);
+        }
+      };
+    }
+  }
+  
+  /**
+   * @since solr 1.4
+   */
   public void testMultiWordSynonyms() throws IOException {
     SynonymFilterFactory factory = new SynonymFilterFactory();
     Map<String,String> args = new HashMap<String,String>();
@@ -44,4 +117,308 @@
     // This fails because ["e","e"] is the value of the token stream
     assertTokenStreamContents(ts, new String[] { "a", "e" });
   }
+  
+  /*
+   * This is the best configuration for multi-token query-time synonym expansion.
+   * 
+   * The parser searches for synonyms ignoring case, but in the output returns
+   * the Original String (important for more complex tokenizer chains, ie. 
+   * when synonyms should be found first, then acronyms detected)
+   * 
+   * The parser also returns source tokens for the multi-token group, but
+   * 'eats' the source token when single-token synonym is there. 
+   * 
+   */
+  public static class TestParserReplaceNullsCustomInclOrigAnalyzer extends SynonymFilterFactory.SynonymBuilderFactory {
+    public void inform(ResourceLoader loader) throws IOException {
+      args.put("ignoreCase", "false");
+    }
+    protected SynonymParser getParser(Analyzer analyzer) {
+      return new SolrSynonymParser(true, true, analyzer) {
+        @Override
+        public void add(CharsRef input, CharsRef output, boolean includeOrig) {
+          super.add(lowercase(input), replaceNulls(output), countWords(input) > 1 ? true : false);
+        }
+        private int countWords(CharsRef chars) {
+          int wordCount = 1;
+          int upto = chars.offset;
+          final int limit = chars.offset + chars.length;
+          while(upto < limit) {
+            if (chars.chars[upto++] == SynonymMap.WORD_SEPARATOR) {
+              wordCount++;
+            }
+          }
+          return wordCount;
+        }
+        private CharsRef lowercase(CharsRef chars) {
+          chars = CharsRef.deepCopyOf(chars);
+          final int limit = chars.offset + chars.length;
+          for (int i=chars.offset;i<limit;i++) {
+            chars.chars[i] = Character.toLowerCase(chars.chars[i]); // maybe not correct
+          }
+          return chars;
+        }
+      };
+      
+    }
+  }
+  
+  public void testMultiWordSynonymsReplaceNullsCustomInclOrigAnalyzer() throws IOException {
+    String O = TypeAttribute.DEFAULT_TYPE;
+    String S = SynonymFilter.TYPE_SYNONYM;
+    
+    SynonymFilterFactory factory = new SynonymFilterFactory();
+    Map<String,String> args = new HashMap<String,String>();
+    args.put("synonyms", "synonyms.txt");
+    args.put("tokenizerFactory", "org.apache.lucene.analysis.core.KeywordTokenizerFactory");
+    args.put("builderFactory", TestParserReplaceNullsCustomInclOrigAnalyzer.class.getName());
+    
+    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
+    factory.init(args);
+    factory.inform(getSyn());
+    
+    
+    TokenStream ts = factory.create(new MockTokenizer(new StringReader("foo hubble space telescope"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "foo", "hubble", "hubble space telescope", "HST", "hs telescope", "space", "telescope" },
+        new int[]    {0, 4, 4, 4, 4,11,17}, //startOffset
+        new int[]    {3,10,26,26,26,16,26}, //endOffset
+        new String[] {O, O, S, S, S, O, O}, //type
+        new int[]    {1, 1, 0, 0, 0, 1, 1}  //posIncr
+    );
+    
+    // test ignoreCase=true
+    ts = factory.create(new MockTokenizer(new StringReader("hst"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "hubble space telescope", "HST", "hs telescope"},
+        new int[]    {0, 0, 0},
+        new int[]    {3, 3, 3},
+        new String[] {S, S, S},
+        new int[]    {1, 0, 0}
+    );
+    
+    ts = factory.create(new MockTokenizer(new StringReader("some foo bar"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "some", "foo", "foo bar", "foo ba", "fu ba", "foobar", "bar" },
+        new int[]    {0, 5, 5, 5, 5, 5, 9}, //startOffset
+        new int[]    {4, 8,12,12,12,12,12}, //endOffset
+        new String[] {O, O, S, S, S, S, O}, //type
+        new int[]    {1, 1, 0, 0, 0, 0, 1}  //posIncr
+    );
+    
+    ts = factory.create(new MockTokenizer(new StringReader("some foobar"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "some", "foo bar", "foo ba", "fu ba", "foobar"},
+        new int[]    {0, 5, 5, 5, 5, 5}, //startOffset
+        new int[]    {4,11,11,11,11,11}, //endOffset
+        new String[] {O, S, S, S, S, S}, //type
+        new int[]    {1, 1, 0, 0, 0, 1}  //posIncr
+    );
+    
+  }
+  
+  public void testMultiWordSynonymsReplaceNullsInclOrig() throws IOException {
+    String O = TypeAttribute.DEFAULT_TYPE;
+    String S = SynonymFilter.TYPE_SYNONYM;
+    
+    SynonymFilterFactory factory = new SynonymFilterFactory();
+    Map<String,String> args = new HashMap<String,String>();
+    args.put("synonyms", "synonyms.txt");
+    args.put("ignoreCase", "true");
+    args.put("tokenizerFactory", "org.apache.lucene.analysis.core.KeywordTokenizerFactory");
+    args.put("builderFactory", TestParserReplaceNullsInclOrig.class.getName());
+    
+    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
+    factory.init(args);
+    factory.inform(getSyn());
+    
+    
+    TokenStream ts = factory.create(new MockTokenizer(new StringReader("foo hubble space telescope"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "foo", "hubble", "hubble space telescope", "hst", "hs telescope", "space", "telescope" },
+        new int[]    {0, 4, 4, 4, 4,11,17}, //startOffset
+        new int[]    {3,10,26,26,26,16,26}, //endOffset
+        new String[] {O, O, S, S, S, O, O}, //type
+        new int[]    {1, 1, 0, 0, 0, 1, 1}  //posIncr
+    );
+    
+    
+    ts = factory.create(new MockTokenizer(new StringReader("hst"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "hst", "hubble space telescope", "hst", "hs telescope"},
+        new int[]    {0, 0, 0, 0},
+        new int[]    {3, 3, 3, 3},
+        new String[] {O, S, S, S},
+        new int[]    {1, 0, 0, 0}
+    );
+    
+    ts = factory.create(new MockTokenizer(new StringReader("some foo bar"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "some", "foo", "foo bar", "foo ba", "fu ba", "foobar", "bar" },
+        new int[]    {0, 5, 5, 5, 5, 5, 9}, //startOffset
+        new int[]    {4, 8,12,12,12,12,12}, //endOffset
+        new String[] {O, O, S, S, S, S, O}, //type
+        new int[]    {1, 1, 0, 0, 0, 0, 1}  //posIncr
+    );
+    
+    ts = factory.create(new MockTokenizer(new StringReader("some foobar"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "some", "foobar", "foo bar", "foo ba", "fu ba", "foobar"},
+        new int[]    {0, 5, 5, 5, 5, 5, 5}, //startOffset
+        new int[]    {4,11,11,11,11,11,11}, //endOffset
+        new String[] {O, O, S, S, S, S, S}, //type
+        new int[]    {1, 1, 0, 0, 0, 0, 1}  //posIncr
+    );
+    
+  }
+  
+  
+  public void testMultiWordSynonymsNullReplaced() throws IOException {
+    String O = TypeAttribute.DEFAULT_TYPE;
+    String S = SynonymFilter.TYPE_SYNONYM;
+    
+    SynonymFilterFactory factory = new SynonymFilterFactory();
+    Map<String,String> args = new HashMap<String,String>();
+    args.put("synonyms", "synonyms.txt");
+    args.put("ignoreCase", "false");
+    args.put("tokenizerFactory", "org.apache.lucene.analysis.core.KeywordTokenizerFactory");
+    args.put("builderFactory", TestParserReplaceNulls.class.getName());
+    
+    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
+    factory.init(args);
+    factory.inform(getSyn());
+    
+    
+    TokenStream ts = factory.create(new MockTokenizer(new StringReader("foo hubble space telescope"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "foo", "hubble space telescope", "HST", "hs telescope" },
+        new int[]    {0, 4, 4, 4}, //startOffset
+        new int[]    {3,26,26,26}, //endOffset
+        new String[] {O, S, S, S}, //type
+        new int[]    {1, 1, 0, 0}  //posIncr
+    );
+    
+    
+    ts = factory.create(new MockTokenizer(new StringReader("HST"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "hubble space telescope", "HST", "hs telescope"},
+        new int[]    {0, 0, 0},
+        new int[]    {3, 3, 3},
+        new String[] {S, S, S},
+        new int[]    {1, 0, 0}
+    );
+    
+    ts = factory.create(new MockTokenizer(new StringReader("some foo bar"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "some", "foo bar", "foo ba", "fu ba", "foobar" },
+        new int[]    {0, 5, 5, 5, 5}, //startOffset
+        new int[]    {4,12,12,12,12}, //endOffset
+        new String[] {O, S, S, S, S}, //type
+        new int[]    {1, 1, 0, 0, 0}  //posIncr
+    );
+    
+    ts = factory.create(new MockTokenizer(new StringReader("some foobar"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "some", "foo bar", "foo ba", "fu ba", "foobar"},
+        new int[]    {0, 5, 5, 5, 5, 5}, //startOffset
+        new int[]    {4,11,11,11,11,11}, //endOffset
+        new String[] {O, S, S, S, S, S}, //type
+        new int[]    {1, 1, 0, 0, 0, 1}  //posIncr
+    );
+    
+  }
+  
+  
+  
+  public void testMultiWordSynonymsDefault() throws IOException {
+    String O = TypeAttribute.DEFAULT_TYPE;
+    String S = SynonymFilter.TYPE_SYNONYM;
+    
+    SynonymFilterFactory factory = new SynonymFilterFactory();
+    Map<String,String> args = new HashMap<String,String>();
+    args.put("synonyms", "synonyms.txt");
+    args.put("tokenizerFactory", "org.apache.lucene.analysis.core.KeywordTokenizerFactory");
+    
+    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
+    factory.init(args);
+    factory.inform(getSyn());
+    
+    
+    TokenStream ts = factory.create(new MockTokenizer(new StringReader("foo hubble space telescope"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "foo", "hubble", "HST", "hs telescope", "space", "telescope" },
+        new int[]    {0, 4, 4, 4,11,17}, //startOffset
+        new int[]    {3,10,26,26,16,26}, //endOffset
+        new String[] {O, S, S, S, S, S}, //type
+        new int[]    {1, 1, 0, 0, 1, 1}  //posIncr
+    );
+    
+    
+    ts = factory.create(new MockTokenizer(new StringReader("HST"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "hubble", "HST", "hs telescope", "space", "telescope" },
+        new int[]    {0, 0, 0, 0, 0},
+        new int[]    {3, 3, 3, 3, 3},
+        new String[] {S, S, S, S, S},
+        new int[]    {1, 0, 0, 1, 1}
+    );
+    
+    ts = factory.create(new MockTokenizer(new StringReader("some foo bar"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "some", "foo", "foo ba", "fu ba", "foobar", "bar" },
+        new int[]    {0, 5, 5, 5, 5, 9}, //startOffset
+        new int[]    {4, 8,12,12,12,12}, //endOffset
+        new String[] {O, S, S, S, S, S}, //type
+        new int[]    {1, 1, 0, 0, 0, 1}  //posIncr
+    );
+    
+    ts = factory.create(new MockTokenizer(new StringReader("some foobar"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "some", "foo", "foo ba", "fu ba", "foobar", "bar"},
+        new int[]    {0, 5, 5, 5, 5, 5}, //startOffset
+        new int[]    {4,11,11,11,11,11}, //endOffset
+        new String[] {O, S, S, S, S, S}, //type
+        new int[]    {1, 1, 0, 0, 0, 1}  //posIncr
+    );
+    
+  }
+  
+  /*
+   * The default behaviour but the original tokens are emitted
+   * before the synonyms
+   */
+  public void testMultiWordSynonymsInclOrig() throws IOException {
+    String O = TypeAttribute.DEFAULT_TYPE;
+    String S = SynonymFilter.TYPE_SYNONYM;
+    
+    SynonymFilterFactory factory = new SynonymFilterFactory();
+    Map<String,String> args = new HashMap<String,String>();
+    args.put("synonyms", "synonyms.txt");
+    args.put("ignoreCase", "true");
+    args.put("tokenizerFactory", "org.apache.lucene.analysis.core.KeywordTokenizerFactory");
+    args.put("builderFactory", TestParserInclOrig.class.getName());
+    
+    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
+    factory.init(args);
+    factory.inform(getSyn());
+    
+    
+    TokenStream ts = factory.create(new MockTokenizer(new StringReader("foo hubble space telescope"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "foo", "hubble", "hubble", "hst", "hs telescope", "space", "space", "telescope", "telescope" },
+    		new int[]    {0, 4, 4, 4, 4,11,11,17,17}, //startOffset
+    		new int[]    {3,10,10,26,26,16,16,26,26}, //endOffset
+    		new String[] {O, O, S, S, S, O, S, O, S}, //type
+    		new int[]    {1, 1, 0, 0, 0, 1, 0, 1, 0}  //posIncr
+    );
+    
+    
+    ts = factory.create(new MockTokenizer(new StringReader("hst"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "hst", "hubble", "hst", "hs telescope", "space", "telescope" },
+    		new int[]    {0, 0, 0, 0, 0, 0},
+    		new int[]    {3, 3, 3, 3, 3, 3},
+    		new String[] {O, S, S, S, S, S},
+    		new int[]    {1, 0, 0, 0, 1, 1}
+    );
+    
+    ts = factory.create(new MockTokenizer(new StringReader("some foo bar"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "some", "foo", "foo", "foo ba", "fu ba", "foobar", "bar", "bar" },
+    		new int[]    {0, 5, 5, 5, 5, 5, 9, 9}, //startOffset
+    		new int[]    {4, 8, 8,12,12,12,12,12}, //endOffset
+    		new String[] {O, O, S, S, S, S, O, S}, //type
+    		new int[]    {1, 1, 0, 0, 0, 0, 1, 0}  //posIncr
+    );
+    
+    ts = factory.create(new MockTokenizer(new StringReader("some foobar"), MockTokenizer.WHITESPACE, false));
+    assertTokenStreamContents(ts, new String[] { "some", "foobar", "foo", "foo ba", "fu ba", "foobar", "bar"},
+    		new int[]    {0, 5, 5, 5, 5, 5, 5}, //startOffset
+    		new int[]    {4,11,11,11,11,11,11}, //endOffset
+    		new String[] {O, O, S, S, S, S, S}, //type
+    		new int[]    {1, 1, 0, 0, 0, 0, 1}  //posIncr
+    );
+    
+  }
 }
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java	(revision 1400800)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java	(working copy)
@@ -560,6 +560,7 @@
             nextRead = rollIncr(nextRead);
             inputSkipCount--;
           }
+          //System.out.println("  offset=(" + offsetAtt.startOffset() + "," +  offsetAtt.endOffset() + ") posIncr=" + posIncrAtt.getPositionIncrement() + " posLen=" + posLenAtt.getPositionLength());
           //System.out.println("  return token=" + termAtt.toString());
           return true;
         } else if (outputs.upto < outputs.count) {
@@ -584,6 +585,7 @@
             nextRead = rollIncr(nextRead);
             inputSkipCount--;
           }
+          //System.out.println("  offset=(" + offsetAtt.startOffset() + "," +  offsetAtt.endOffset() + ") posIncr=" + posIncrAtt.getPositionIncrement() + " posLen=" + posLenAtt.getPositionLength());
           //System.out.println("  return token=" + termAtt.toString());
           return true;
         } else {
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SolrSynonymParser.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SolrSynonymParser.java	(revision 1400795)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SolrSynonymParser.java	(working copy)
@@ -54,7 +54,7 @@
  *  </ol>
  * @lucene.experimental
  */
-public class SolrSynonymParser extends SynonymMap.Builder {
+public class SolrSynonymParser extends SynonymFilterFactory.SynonymParser {
   private final boolean expand;
   private final Analyzer analyzer;
   
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java	(revision 1400795)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java	(working copy)
@@ -34,8 +34,6 @@
 import org.apache.lucene.analysis.core.WhitespaceTokenizer;
 import org.apache.lucene.analysis.synonym.SynonymFilter;
 import org.apache.lucene.analysis.synonym.SynonymMap;
-import org.apache.lucene.analysis.synonym.SolrSynonymParser;
-import org.apache.lucene.analysis.synonym.WordnetSynonymParser;
 import org.apache.lucene.analysis.util.*;
 import org.apache.lucene.util.Version;
 
@@ -51,115 +49,130 @@
  *   &lt;/analyzer&gt;
  * &lt;/fieldType&gt;</pre>
  */
-public class SynonymFilterFactory extends TokenFilterFactory implements ResourceLoaderAware {
+public class SynonymFilterFactory extends AbstractAnalysisFactory implements ResourceLoaderAware {
   private SynonymMap map;
   private boolean ignoreCase;
   
-  @Override
   public TokenStream create(TokenStream input) {
     // if the fst is null, it means there's actually no synonyms... just return the original stream
     // as there is nothing to do here.
     return map.fst == null ? input : new SynonymFilter(input, map, ignoreCase);
   }
-
-  @Override
+  
+  //@Override
   public void inform(ResourceLoader loader) throws IOException {
     final boolean ignoreCase = getBoolean("ignoreCase", false); 
     this.ignoreCase = ignoreCase;
 
-    String tf = args.get("tokenizerFactory");
+    String bf = args.get("builderFactory");
+    SynonymBuilderFactory builder = loadBuilderFactory(loader, bf != null ? bf : SynonymBuilderFactory.class.getName());
+    
+    try {
+      map = builder.create(loader);
+    } catch (ParseException e) {
+      throw new IOException(e);
+    }
+  }
+  
+  
+  public static class SynonymParser extends SynonymMap.Builder {
 
-    final TokenizerFactory factory = tf == null ? null : loadTokenizerFactory(loader, tf);
+    public SynonymParser(boolean dedup) {
+      super(dedup);
+    }
+
+    public void add(Reader in) throws IOException, ParseException {
+      throw new IllegalAccessError("You must override this method");
+    }
+  }
+  
+  
+  public static class SynonymBuilderFactory extends AbstractAnalysisFactory implements ResourceLoaderAware {
     
-    Analyzer analyzer = new Analyzer() {
-      @Override
-      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
-        Tokenizer tokenizer = factory == null ? new WhitespaceTokenizer(Version.LUCENE_50, reader) : factory.create(reader);
-        TokenStream stream = ignoreCase ? new LowerCaseFilter(Version.LUCENE_50, tokenizer) : tokenizer;
-        return new TokenStreamComponents(tokenizer, stream);
+    public SynonymMap create(ResourceLoader loader) throws IOException, ParseException {
+      
+      String synonyms = args.get("synonyms");
+      if (synonyms == null)
+        throw new IllegalArgumentException("Missing required argument 'synonyms'.");
+      
+      CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder()
+        .onMalformedInput(CodingErrorAction.REPORT)
+        .onUnmappableCharacter(CodingErrorAction.REPORT);
+      
+      SynonymParser parser = getParser(getAnalyzer(loader));
+      
+      File synonymFile = new File(synonyms);
+      if (synonymFile.exists()) {
+        decoder.reset();
+        parser.add(new InputStreamReader(loader.openResource(synonyms), decoder));
+      } else {
+        List<String> files = splitFileNames(synonyms);
+        for (String file : files) {
+          decoder.reset();
+          parser.add(new InputStreamReader(loader.openResource(file), decoder));
+        }
       }
-    };
+      return parser.build();
+      
+    }
+    
+    protected Analyzer getAnalyzer(ResourceLoader loader) throws IOException {
+      final boolean ignoreCase = getBoolean("ignoreCase", false); 
 
-    String format = args.get("format");
-    try {
+      String tf = args.get("tokenizerFactory");
+
+      final TokenizerFactory factory = tf == null ? null : loadTokenizerFactory(loader, tf);
+      
+      return new Analyzer() {
+        @Override
+        protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
+          Tokenizer tokenizer = factory == null ? new WhitespaceTokenizer(Version.LUCENE_50, reader) : factory.create(reader);
+          TokenStream stream = ignoreCase ? new LowerCaseFilter(Version.LUCENE_50, tokenizer) : tokenizer;
+          return new TokenStreamComponents(tokenizer, stream);
+        }
+      };
+    }
+    
+    protected SynonymParser getParser(Analyzer analyzer) {
+      
+      String format = args.get("format");
+      boolean expand = getBoolean("expand", true);
+      
       if (format == null || format.equals("solr")) {
         // TODO: expose dedup as a parameter?
-        map = loadSolrSynonyms(loader, true, analyzer);
+        return new SolrSynonymParser(true, expand, analyzer);
       } else if (format.equals("wordnet")) {
-        map = loadWordnetSynonyms(loader, true, analyzer);
+        return new WordnetSynonymParser(true, expand, analyzer);
       } else {
         // TODO: somehow make this more pluggable
         throw new IllegalArgumentException("Unrecognized synonyms format: " + format);
       }
-    } catch (ParseException e) {
-      throw new IOException("Error parsing synonyms file:", e);
     }
-  }
-  
-  /**
-   * Load synonyms from the solr format, "format=solr".
-   */
-  private SynonymMap loadSolrSynonyms(ResourceLoader loader, boolean dedup, Analyzer analyzer) throws IOException, ParseException {
-    final boolean expand = getBoolean("expand", true);
-    String synonyms = args.get("synonyms");
-    if (synonyms == null)
-      throw new IllegalArgumentException("Missing required argument 'synonyms'.");
     
-    CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder()
-      .onMalformedInput(CodingErrorAction.REPORT)
-      .onUnmappableCharacter(CodingErrorAction.REPORT);
     
-    SolrSynonymParser parser = new SolrSynonymParser(dedup, expand, analyzer);
-    File synonymFile = new File(synonyms);
-    if (synonymFile.exists()) {
-      decoder.reset();
-      parser.add(new InputStreamReader(loader.openResource(synonyms), decoder));
-    } else {
-      List<String> files = splitFileNames(synonyms);
-      for (String file : files) {
-        decoder.reset();
-        parser.add(new InputStreamReader(loader.openResource(file), decoder));
+    // (there are no tests for this functionality)
+    private TokenizerFactory loadTokenizerFactory(ResourceLoader loader, String cname) throws IOException {
+      TokenizerFactory tokFactory = loader.newInstance(cname, TokenizerFactory.class);
+      tokFactory.setLuceneMatchVersion(luceneMatchVersion);
+      tokFactory.init(args);
+      if (tokFactory instanceof ResourceLoaderAware) {
+        ((ResourceLoaderAware) tokFactory).inform(loader);
       }
+      return tokFactory;
     }
-    return parser.build();
-  }
-  
-  /**
-   * Load synonyms from the wordnet format, "format=wordnet".
-   */
-  private SynonymMap loadWordnetSynonyms(ResourceLoader loader, boolean dedup, Analyzer analyzer) throws IOException, ParseException {
-    final boolean expand = getBoolean("expand", true);
-    String synonyms = args.get("synonyms");
-    if (synonyms == null)
-      throw new IllegalArgumentException("Missing required argument 'synonyms'.");
-    
-    CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder()
-      .onMalformedInput(CodingErrorAction.REPORT)
-      .onUnmappableCharacter(CodingErrorAction.REPORT);
-    
-    WordnetSynonymParser parser = new WordnetSynonymParser(dedup, expand, analyzer);
-    File synonymFile = new File(synonyms);
-    if (synonymFile.exists()) {
-      decoder.reset();
-      parser.add(new InputStreamReader(loader.openResource(synonyms), decoder));
-    } else {
-      List<String> files = splitFileNames(synonyms);
-      for (String file : files) {
-        decoder.reset();
-        parser.add(new InputStreamReader(loader.openResource(file), decoder));
-      }
+
+    public void inform(ResourceLoader loader) throws IOException {
+      // do nothing
     }
-    return parser.build();
   }
   
-  // (there are no tests for this functionality)
-  private TokenizerFactory loadTokenizerFactory(ResourceLoader loader, String cname) throws IOException {
-    TokenizerFactory tokFactory = loader.newInstance(cname, TokenizerFactory.class);
-    tokFactory.setLuceneMatchVersion(luceneMatchVersion);
-    tokFactory.init(args);
-    if (tokFactory instanceof ResourceLoaderAware) {
-      ((ResourceLoaderAware) tokFactory).inform(loader);
+  private SynonymBuilderFactory loadBuilderFactory(ResourceLoader loader, String cname) throws IOException {
+    SynonymBuilderFactory builderFactory = loader.newInstance(cname, SynonymBuilderFactory.class);
+    builderFactory.setLuceneMatchVersion(luceneMatchVersion);
+    builderFactory.init(args);
+    if (builderFactory instanceof ResourceLoaderAware) {
+      ((ResourceLoaderAware) builderFactory).inform(loader);
     }
-    return tokFactory;
+    return builderFactory;
   }
 }
Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/WordnetSynonymParser.java
===================================================================
--- lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/WordnetSynonymParser.java	(revision 1400795)
+++ lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/WordnetSynonymParser.java	(working copy)
@@ -32,7 +32,7 @@
  * @lucene.experimental
  */
 // TODO: allow you to specify syntactic categories (e.g. just nouns, etc)
-public class WordnetSynonymParser extends SynonymMap.Builder {
+public class WordnetSynonymParser extends SynonymFilterFactory.SynonymParser {
   private final boolean expand;
   private final Analyzer analyzer;
   
