Index: lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/InfixSuggesterTest.java
===================================================================
--- lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/InfixSuggesterTest.java	(revision 0)
+++ lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/InfixSuggesterTest.java	(working copy)
@@ -0,0 +1,68 @@
+package org.apache.lucene.search.suggest.analyzing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.List;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenFilter;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.search.suggest.TermFreq;
+import org.apache.lucene.search.suggest.TermFreqArrayIterator;
+import org.apache.lucene.search.suggest.Lookup.LookupResult;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class InfixSuggesterTest extends LuceneTestCase {
+  /**
+   * basic "standardanalyzer" test with stopword removal
+   */
+  public void testStandard() throws Exception {
+    TermFreq keys[] = new TermFreq[] {
+        new TermFreq("the ghost of christmas past", 50),
+    };
+    
+    Analyzer standard = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET, false);
+    FuzzySuggester suggester = new InfixingSuggester(standard);
+    suggester.build(new TermFreqArrayIterator(keys));
+    
+    List<LookupResult> results = suggester.lookup(_TestUtil.stringToCharSequence("the ghost of chris", random()), false, 1);
+    assertEquals(1, results.size());
+    assertEquals("the ghost of christmas past", results.get(0).key.toString());
+    assertEquals(50, results.get(0).value, 0.01F);
+
+    // omit the 'the' since its a stopword, its suggested anyway
+    results = suggester.lookup(_TestUtil.stringToCharSequence("ghost of chris", random()), false, 1);
+    assertEquals(1, results.size());
+    assertEquals("the ghost of christmas past", results.get(0).key.toString());
+    assertEquals(50, results.get(0).value, 0.01F);
+
+    // omit the 'the' and 'of' since they are stopwords, its suggested anyway
+    results = suggester.lookup(_TestUtil.stringToCharSequence("ghost chris", random()), false, 1);
+    assertEquals(1, results.size());
+    assertEquals("the ghost of christmas past", results.get(0).key.toString());
+    assertEquals(50, results.get(0).value, 0.01F);
+    
+    // now for the infixing:
+    results = suggester.lookup(_TestUtil.stringToCharSequence("chris", random()), false, 1);
+    assertEquals(1, results.size());
+    assertEquals("the ghost of christmas past", results.get(0).key.toString());
+    assertEquals(50, results.get(0).value, 0.01F);
+  }
+}

Property changes on: lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/InfixSuggesterTest.java
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Index: lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FuzzySuggester.java
===================================================================
--- lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FuzzySuggester.java	(revision 1457993)
+++ lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FuzzySuggester.java	(working copy)
@@ -67,7 +67,7 @@
  * lookup performance. At index time, complex analyzers can safely be used.
  * </p>
  */
-public final class FuzzySuggester extends AnalyzingSuggester {
+public class FuzzySuggester extends AnalyzingSuggester {
   private final int maxEdits;
   private final boolean transpositions;
   private final int nonFuzzyPrefix;
Index: lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/InfixingSuggester.java
===================================================================
--- lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/InfixingSuggester.java	(revision 0)
+++ lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/InfixingSuggester.java	(working copy)
@@ -0,0 +1,105 @@
+package org.apache.lucene.search.suggest.analyzing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStreamToAutomaton;
+import org.apache.lucene.util.automaton.Automaton;
+import org.apache.lucene.util.automaton.State;
+import org.apache.lucene.util.automaton.Transition;
+
+public class InfixingSuggester extends FuzzySuggester {
+  
+  // note: these ctors should really take a 'maxInfix' depth
+
+  public InfixingSuggester(Analyzer analyzer) {
+    super(analyzer);
+  }
+
+  public InfixingSuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer) {
+    super(indexAnalyzer, queryAnalyzer);
+  }
+  
+  public InfixingSuggester(Analyzer indexAnalyzer, Analyzer queryAnalyzer,
+      int options, int maxSurfaceFormsPerAnalyzedForm, int maxGraphExpansions,
+      int maxEdits, boolean transpositions, int nonFuzzyPrefix,
+      int minFuzzyLength) {
+    super(indexAnalyzer, queryAnalyzer, options, maxSurfaceFormsPerAnalyzedForm,
+        maxGraphExpansions, maxEdits, transpositions, nonFuzzyPrefix,
+        minFuzzyLength);
+  }
+
+  // note: I didnt take the time to deal with accepts empty string with the
+  // e-transitions back to the initial state
+  @Override
+  protected void replaceSep(Automaton a, boolean isQuery) {
+    if (!isQuery) {
+      State initial = a.getInitialState();
+      List<Transition> newInitialTransitions = new ArrayList<Transition>();
+      Transition[] initialTransitions = initial.transitionsArray;
+      for (int i = 0; i < initial.numTransitions; i++) {
+        newInitialTransitions.add(initialTransitions[i]);
+      }
+      State[] states = a.getNumberedStates();
+
+      // Go in reverse topo sort so we know we only have to
+      // make one pass:
+      for(int stateNumber=states.length-1;stateNumber >=0;stateNumber--) {
+        final State state = states[stateNumber];
+        if (state == initial) {
+          continue;
+        }
+        List<Transition> newTransitions = new ArrayList<Transition>();
+        for(Transition t : state.getTransitions()) {
+          assert t.getMin() == t.getMax();
+          if (t.getMin() == TokenStreamToAutomaton.POS_SEP) {
+            if (preserveSep) {
+              // Remap to SEP_LABEL:
+              newTransitions.add(new Transition(SEP_LABEL, t.getDest()));
+            } else {
+              copyDestTransitions(state, t.getDest(), newTransitions);
+            }
+            copyDestTransitions(initial, t.getDest(), newInitialTransitions);
+            a.setDeterministic(false);
+          } else if (t.getMin() == TokenStreamToAutomaton.HOLE) {
+
+            // Just remove the hole: there will then be two
+            // SEP tokens next to each other, which will only
+            // match another hole at search time.  Note that
+            // it will also match an empty-string token ... if
+            // that's somehow a problem we can always map HOLE
+            // to a dedicated byte (and escape it in the
+            // input).
+            copyDestTransitions(state, t.getDest(), newTransitions);
+            copyDestTransitions(initial, t.getDest(), newInitialTransitions);
+            a.setDeterministic(false);
+          } else {
+            newTransitions.add(t);
+          }
+        }
+        state.setTransitions(newTransitions.toArray(new Transition[newTransitions.size()]));
+      }
+      initial.setTransitions(newInitialTransitions.toArray(new Transition[newInitialTransitions.size()]));
+    } else {
+      super.replaceSep(a, true);
+    }
+  }
+}

Property changes on: lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/InfixingSuggester.java
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Index: lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java
===================================================================
--- lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java	(revision 1457993)
+++ lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java	(working copy)
@@ -144,7 +144,7 @@
   /** 
    * True if separator between tokens should be preserved.
    */
-  private final boolean preserveSep;
+  final boolean preserveSep;
 
   /** Include this flag in the options parameter to {@link
    *  #AnalyzingSuggester(Analyzer,Analyzer,int,int,int)} to always
@@ -160,7 +160,7 @@
 
   /** Represents the separation between tokens, if
    *  PRESERVE_SEP was specified */
-  private static final int SEP_LABEL = 0xff;
+  static final int SEP_LABEL = 0xff;
 
   /** Marks end of the analyzed input and start of dedup
    *  byte. */
@@ -248,7 +248,7 @@
     return fst == null ? 0 : fst.sizeInBytes();
   }
 
-  private void copyDestTransitions(State from, State to, List<Transition> transitions) {
+  void copyDestTransitions(State from, State to, List<Transition> transitions) {
     if (to.isAccept()) {
       from.setAccept(true);
     }
@@ -257,9 +257,11 @@
     }
   }
 
-  // Replaces SEP with epsilon or remaps them if
-  // we were asked to preserve them:
-  private void replaceSep(Automaton a) {
+  /** Replaces SEP with epsilon or remaps them if
+   * we were asked to preserve them,
+   * or does other unrelated things since this patch is a hack!
+   */
+  void replaceSep(Automaton a, boolean isQuery) {
 
     State[] states = a.getNumberedStates();
 
@@ -851,7 +853,7 @@
     Automaton automaton = ts2a.toAutomaton(ts);
     ts.close();
 
-    replaceSep(automaton);
+    replaceSep(automaton, false);
 
     assert SpecialOperations.isFinite(automaton);
 
@@ -878,7 +880,7 @@
     // This way we could eg differentiate "net" from "net ",
     // which we can't today...
 
-    replaceSep(automaton);
+    replaceSep(automaton, true);
 
     // TODO: we can optimize this somewhat by determinizing
     // while we convert
