diff --git oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinition.java oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinition.java
index e0669f2..db3ec9e 100644
--- oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinition.java
+++ oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinition.java
@@ -159,6 +159,8 @@ class IndexDefinition implements Aggregate.AggregateMapper{
 
     private final Analyzer analyzer;
 
+    private final Map<String, Analyzer> analyzers;
+
     public IndexDefinition(NodeState root, NodeState defn) {
         this(root, defn, null);
     }
@@ -212,6 +214,7 @@ class IndexDefinition implements Aggregate.AggregateMapper{
         this.costPerEntry = getOptionalValue(defn, LuceneIndexConstants.COST_PER_ENTRY, 1.0);
         this.costPerExecution = getOptionalValue(defn, LuceneIndexConstants.COST_PER_EXECUTION, 1.0);
         this.indexesAllTypes = areAllTypesIndexed();
+        this.analyzers = collectAnalyzers(defn);
         this.analyzer = createAnalyzer();
     }
 
@@ -301,14 +304,28 @@ class IndexDefinition implements Aggregate.AggregateMapper{
     //~---------------------------------------------------< Analyzer >
 
     private Analyzer createAnalyzer() {
+        Analyzer defaultAnalyzer = LuceneIndexConstants.ANALYZER;
+        if (analyzers.containsKey(LuceneIndexConstants.ANL_DEFAULT)){
+            defaultAnalyzer = analyzers.get(LuceneIndexConstants.ANL_DEFAULT);
+        }
         if (!evaluatePathRestrictions()){
-            return LuceneIndexConstants.ANALYZER;
+            return defaultAnalyzer;
         }
         Map<String, Analyzer> analyzerMap = ImmutableMap.<String, Analyzer>builder()
                 .put(FieldNames.ANCESTORS,
                         new TokenizerChain(new PathHierarchyTokenizerFactory(Collections.<String, String>emptyMap())))
                 .build();
-        return new PerFieldAnalyzerWrapper(LuceneIndexConstants.ANALYZER, analyzerMap);
+        return new PerFieldAnalyzerWrapper(defaultAnalyzer, analyzerMap);
+    }
+
+    private static Map<String, Analyzer> collectAnalyzers(NodeState defn) {
+        Map<String, Analyzer> analyzerMap = newHashMap();
+        NodeStateAnalyzerFactory factory = new NodeStateAnalyzerFactory(LuceneIndexConstants.VERSION);
+        for (ChildNodeEntry cne : defn.getChildNode(LuceneIndexConstants.ANALYZERS).getChildNodeEntries()) {
+            Analyzer a = factory.createInstance(cne.getNodeState());
+            analyzerMap.put(cne.getName(), a);
+        }
+        return ImmutableMap.copyOf(analyzerMap);
     }
 
     //~---------------------------------------------------< Aggregates >
diff --git oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexConstants.java oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexConstants.java
index 0abfd73..61a3a14 100644
--- oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexConstants.java
+++ oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexConstants.java
@@ -17,6 +17,7 @@
 package org.apache.jackrabbit.oak.plugins.index.lucene;
 
 import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
 import org.apache.lucene.util.Version;
 
 public interface LuceneIndexConstants {
@@ -182,4 +183,21 @@ public interface LuceneIndexConstants {
     String COST_PER_ENTRY = "costPerEntry";
 
     String COST_PER_EXECUTION = "costPerExecution";
+
+    /**
+     * Node name under which various analyzers are configured
+     */
+    String ANALYZERS = "analyzers";
+
+    /**
+     * Name of the default analyzer definition node under 'analyzers' node
+     */
+    String ANL_DEFAULT = "default";
+    String ANL_FILTERS = "filters";
+    String ANL_STOPWORDS = "stopwords";
+    String ANL_TOKENIZER = "tokenizer";
+    String ANL_CHAR_FILTERS = "charFilters";
+    String ANL_CLASS = "class";
+    String ANL_NAME = "name";
+    String ANL_LUCENE_MATCH_VERSION = AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM;
 }
diff --git oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexProviderService.java oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexProviderService.java
index 5ff90e8..4874e22 100644
--- oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexProviderService.java
+++ oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexProviderService.java
@@ -44,6 +44,9 @@ import org.apache.jackrabbit.oak.spi.query.QueryIndexProvider;
 import org.apache.jackrabbit.oak.spi.whiteboard.Registration;
 import org.apache.jackrabbit.oak.spi.whiteboard.Whiteboard;
 import org.apache.jackrabbit.oak.spi.whiteboard.WhiteboardExecutor;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.apache.lucene.analysis.util.TokenizerFactory;
 import org.apache.lucene.util.InfoStream;
 import org.osgi.framework.BundleContext;
 import org.osgi.framework.ServiceRegistration;
@@ -99,6 +102,7 @@ public class LuceneIndexProviderService {
     @Activate
     private void activate(BundleContext bundleContext, Map<String, ?> config)
             throws NotCompliantMBeanException {
+        initializeFactoryClassLoaders(getClass().getClassLoader());
         whiteboard = new OsgiWhiteboard(bundleContext);
 
         indexProvider = new LuceneIndexProvider(createTracker(bundleContext, config));
@@ -190,6 +194,16 @@ public class LuceneIndexProviderService {
         return new IndexTracker();
     }
 
+    private void initializeFactoryClassLoaders(ClassLoader classLoader) {
+        //Factories use the Threads context classloader to perform SPI classes
+        //lookup by default which would not work in OSGi world. So reload the
+        //factories by providing the bundle classloader
+        TokenizerFactory.reloadTokenizers(classLoader);
+        CharFilterFactory.reloadCharFilters(classLoader);
+        TokenFilterFactory.reloadTokenFilters(classLoader);
+    }
+
+
     protected void bindNodeAggregator(NodeAggregator aggregator) {
         this.nodeAggregator = aggregator;
         initialize();
diff --git oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/NodeStateAnalyzerFactory.java oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/NodeStateAnalyzerFactory.java
new file mode 100644
index 0000000..0e6c1b0
--- /dev/null
+++ oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/NodeStateAnalyzerFactory.java
@@ -0,0 +1,284 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.plugins.index.lucene;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.lang.reflect.Constructor;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.PropertyState;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.lucene.util.TokenizerChain;
+import org.apache.jackrabbit.oak.plugins.tree.ImmutableTree;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.ClasspathResourceLoader;
+import org.apache.lucene.analysis.util.ResourceLoader;
+import org.apache.lucene.analysis.util.ResourceLoaderAware;
+import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.apache.lucene.analysis.util.TokenizerFactory;
+import org.apache.lucene.analysis.util.WordlistLoader;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.Version;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * Constructs the TokenizerChain based on NodeState content. Approach taken is similar
+ * to one taken in org.apache.solr.schema.FieldTypePluginLoader which is implemented for
+ * xml based config. Resource lookup are performed via binary property access
+ */
+final class NodeStateAnalyzerFactory{
+    private static final AtomicBoolean versionWarningAlreadyLogged = new AtomicBoolean(false);
+
+    private static final Set<String> IGNORE_PROP_NAMES = ImmutableSet.of(LuceneIndexConstants.ANL_CLASS, LuceneIndexConstants.ANL_NAME);
+
+    private static final Logger log = LoggerFactory.getLogger(NodeStateAnalyzerFactory.class);
+
+    private final ResourceLoader defaultLoader;
+    private final Version defaultVersion;
+
+    public NodeStateAnalyzerFactory(Version defaultVersion){
+        this(new ClasspathResourceLoader(NodeStateAnalyzerFactory.class.getClassLoader()), defaultVersion);
+    }
+
+    public NodeStateAnalyzerFactory(ResourceLoader defaultLoader, Version defaultVersion) {
+        this.defaultLoader = defaultLoader;
+        this.defaultVersion = defaultVersion;
+    }
+
+    public Analyzer createInstance(NodeState state) {
+        if (state.hasProperty(LuceneIndexConstants.ANL_CLASS)){
+            return createAnalyzerViaReflection(state);
+        }
+        return composeAnalyzer(state);
+    }
+
+    private Analyzer composeAnalyzer(NodeState state) {
+        TokenizerFactory tf = loadTokenizer(state.getChildNode(LuceneIndexConstants.ANL_TOKENIZER));
+        CharFilterFactory[] cfs = loadCharFilterFactories(state.getChildNode(LuceneIndexConstants.ANL_CHAR_FILTERS));
+        TokenFilterFactory[] tffs = loadTokenFilterFactories(state.getChildNode(LuceneIndexConstants.ANL_FILTERS));
+        return new TokenizerChain(cfs, tf, tffs);
+    }
+
+    private TokenFilterFactory[] loadTokenFilterFactories(NodeState tokenFiltersState) {
+        List<TokenFilterFactory> result = newArrayList();
+
+        ImmutableTree tree = new ImmutableTree(tokenFiltersState);
+        for (Tree t : tree.getChildren()){
+            NodeState state = tokenFiltersState.getChildNode(t.getName());
+
+            String factoryType = getFactoryType(state, t.getName());
+            Map<String, String> args = convertNodeState(state);
+            TokenFilterFactory cf = TokenFilterFactory.forName(factoryType, args);
+            init(cf, state);
+            result.add(cf);
+        }
+
+        return result.toArray(new TokenFilterFactory[result.size()]);
+    }
+
+    private CharFilterFactory[] loadCharFilterFactories(NodeState charFiltersState) {
+        List<CharFilterFactory> result = newArrayList();
+
+        //Need to read children in order
+        ImmutableTree tree = new ImmutableTree(charFiltersState);
+        for (Tree t : tree.getChildren()){
+            NodeState state = charFiltersState.getChildNode(t.getName());
+
+            String factoryType = getFactoryType(state, t.getName());
+            Map<String, String> args = convertNodeState(state);
+            CharFilterFactory cf = CharFilterFactory.forName(factoryType, args);
+            init(cf, state);
+            result.add(cf);
+        }
+
+        return result.toArray(new CharFilterFactory[result.size()]);
+    }
+
+    private TokenizerFactory loadTokenizer(NodeState state) {
+        String clazz = checkNotNull(state.getString(LuceneIndexConstants.ANL_NAME));
+        Map<String, String> args = convertNodeState(state);
+        TokenizerFactory tf = TokenizerFactory.forName(clazz, args);
+        init(tf, state);
+        return tf;
+    }
+
+    private Analyzer createAnalyzerViaReflection(NodeState state) {
+        String clazz = state.getString(LuceneIndexConstants.ANL_CLASS);
+        Class<? extends Analyzer> analyzerClazz = defaultLoader.findClass(clazz, Analyzer.class);
+
+        Version matchVersion = getVersion(state);
+        CharArraySet stopwords = null;
+        if (StopwordAnalyzerBase.class.isAssignableFrom(analyzerClazz)
+                && state.hasChildNode(LuceneIndexConstants.ANL_STOPWORDS)) {
+            try {
+                stopwords = loadStopwordSet(state.getChildNode(LuceneIndexConstants.ANL_STOPWORDS), LuceneIndexConstants.ANL_STOPWORDS, matchVersion);
+            } catch (IOException e) {
+                throw new RuntimeException("Error occurred while loading stopwords", e);
+            }
+        }
+        Constructor<? extends Analyzer> c = null;
+
+        try {
+            if (stopwords != null) {
+                c = analyzerClazz.getConstructor(Version.class, CharArraySet.class);
+                return c.newInstance(matchVersion, stopwords);
+            } else {
+                c = analyzerClazz.getConstructor(Version.class);
+                return c.newInstance(matchVersion);
+            }
+        } catch (ReflectiveOperationException e) {
+            throw new RuntimeException("Error occurred while instantiating Analyzer for " + analyzerClazz, e);
+        }
+    }
+
+    private void init(AbstractAnalysisFactory o, NodeState state) {
+        if (o instanceof ResourceLoaderAware) {
+            try {
+                ((ResourceLoaderAware) o).inform(new NodeStateResourceLoader(state, defaultLoader));
+            } catch (IOException e) {
+                throw new IllegalArgumentException("Error occurred while initializing type " + o.getClass(), e);
+            }
+        }
+
+        if (state.hasProperty(LuceneIndexConstants.ANL_LUCENE_MATCH_VERSION)){
+            o.setExplicitLuceneMatchVersion(true);
+        }
+    }
+
+    Map<String, String> convertNodeState(NodeState state) {
+        Map<String, String> result = Maps.newHashMap();
+        for (PropertyState ps : state.getProperties()) {
+            String name = ps.getName();
+            if (ps.getType() != Type.BINARY
+                    && !ps.isArray()
+                    && !IGNORE_PROP_NAMES.contains(name)) {
+                result.put(name, ps.getValue(Type.STRING));
+            }
+        }
+        result.put(LuceneIndexConstants.ANL_LUCENE_MATCH_VERSION, getVersion(state).toString());
+        return result;
+    }
+
+    private Version getVersion(NodeState state){
+        Version version = defaultVersion;
+        if (state.hasProperty(LuceneIndexConstants.ANL_LUCENE_MATCH_VERSION)){
+            version = parseLuceneVersionString(state.getString(LuceneIndexConstants.ANL_LUCENE_MATCH_VERSION));
+        }
+        return version;
+    }
+
+    private static String getFactoryType(NodeState state, String nodeStateName){
+        String type = state.getString(LuceneIndexConstants.ANL_NAME);
+        return type != null ? type : nodeStateName;
+    }
+
+    @SuppressWarnings("deprecation")
+    private static Version parseLuceneVersionString(final String matchVersion) {
+        final Version version = Version.parseLeniently(matchVersion);
+        if (version == Version.LUCENE_CURRENT && !versionWarningAlreadyLogged.getAndSet(true)) {
+            log.warn(
+                    "You should not use LATEST as luceneMatchVersion property: "+
+                            "if you use this setting, and then Solr upgrades to a newer release of Lucene, "+
+                            "sizable changes may happen. If precise back compatibility is important "+
+                            "then you should instead explicitly specify an actual Lucene version."
+            );
+        }
+        return version;
+    }
+
+    /**
+     * Assumes that given state is of type nt:file and then reads
+     * the jcr:content/@jcr:data property to get the binary content
+     */
+    private static Blob getBlob(NodeState state, String resourceName){
+        NodeState contentNode = state.getChildNode(JcrConstants.JCR_CONTENT);
+        checkArgument(contentNode.exists(), "Was expecting to find jcr:content node to read resource %s", resourceName);
+        return contentNode.getProperty(JcrConstants.JCR_DATA).getValue(Type.BINARY);
+    }
+
+    private static CharArraySet loadStopwordSet(NodeState file, String name,
+                                                  Version matchVersion) throws IOException {
+        Blob blob = getBlob(file, name);
+        Reader stopwords = new InputStreamReader(blob.getNewStream(), IOUtils.CHARSET_UTF_8);
+        try {
+            return WordlistLoader.getWordSet(stopwords, matchVersion);
+        } finally {
+            IOUtils.close(stopwords);
+        }
+    }
+
+    static class NodeStateResourceLoader implements ResourceLoader {
+        private final NodeState state;
+        private final ResourceLoader delegate;
+
+        public NodeStateResourceLoader(NodeState state, ResourceLoader delegate) {
+            this.state = state;
+            this.delegate = delegate;
+        }
+
+        @Override
+        public InputStream openResource(String resource) throws IOException {
+            if (state.hasChildNode(resource)){
+                return getBlob(state.getChildNode(resource), resource).getNewStream();
+            }
+            return delegate.openResource(resource);
+        }
+
+        @Override
+        public <T> Class<? extends T> findClass(String cname, Class<T> expectedType) {
+            //For factories the cname is not FQN. Instead its the name without suffix
+            //For e.g. for WhitespaceTokenizerFactory its 'whitespace'
+            if (CharFilterFactory.class.isAssignableFrom(expectedType)) {
+                return CharFilterFactory.lookupClass(cname).asSubclass(expectedType);
+            } else if (TokenizerFactory.class.isAssignableFrom(expectedType)) {
+                return TokenizerFactory.lookupClass(cname).asSubclass(expectedType);
+            } else if (TokenFilterFactory.class.isAssignableFrom(expectedType)) {
+                return TokenFilterFactory.lookupClass(cname).asSubclass(expectedType);
+            }
+            return delegate.findClass(cname, expectedType);
+        }
+
+        @Override
+        public <T> T newInstance(String cname, Class<T> expectedType) {
+            throw new UnsupportedOperationException();
+        }
+    }
+}
diff --git oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/TokenizerChain.java oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/TokenizerChain.java
index c49cc86..e3dffeb 100644
--- oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/TokenizerChain.java
+++ oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/TokenizerChain.java
@@ -20,6 +20,7 @@
 package org.apache.jackrabbit.oak.plugins.index.lucene.util;
 
 import java.io.Reader;
+import java.util.Arrays;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
@@ -63,6 +64,20 @@ public final class TokenizerChain extends Analyzer {
         return reader;
     }
 
+    //Mostly required for testing purpose
+
+    public CharFilterFactory[] getCharFilters() {
+        return Arrays.copyOf(charFilters, charFilters.length);
+    }
+
+    public TokenizerFactory getTokenizer() {
+        return tokenizer;
+    }
+
+    public TokenFilterFactory[] getFilters() {
+        return Arrays.copyOf(filters, filters.length);
+    }
+
     @Override
     protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
         Tokenizer tk = tokenizer.create(reader);
diff --git oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinitionTest.java oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinitionTest.java
index 799b247..b56b135 100644
--- oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinitionTest.java
+++ oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinitionTest.java
@@ -26,6 +26,7 @@ import org.apache.jackrabbit.JcrConstants;
 import org.apache.jackrabbit.oak.api.Tree;
 import org.apache.jackrabbit.oak.commons.PathUtils;
 import org.apache.jackrabbit.oak.plugins.index.IndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.lucene.util.TokenizerChain;
 import org.apache.jackrabbit.oak.plugins.tree.ImmutableTree;
 import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
 import org.apache.jackrabbit.oak.spi.state.NodeState;
@@ -41,6 +42,8 @@ import static org.apache.jackrabbit.JcrConstants.NT_BASE;
 import static org.apache.jackrabbit.oak.api.Type.NAMES;
 import static org.apache.jackrabbit.oak.api.Type.STRINGS;
 import static org.apache.jackrabbit.oak.plugins.index.IndexConstants.INDEX_DEFINITIONS_NAME;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.ANALYZERS;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.ANL_DEFAULT;
 import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.INCLUDE_PROPERTY_NAMES;
 import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.INCLUDE_PROPERTY_TYPES;
 import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.INDEX_DATA_CHILD_NAME;
@@ -445,6 +448,17 @@ public class IndexDefinitionTest {
         assertEquals(50, defn.getFulltextEntryCount(50));
     }
 
+    @Test
+    public void customAnalyzer() throws Exception{
+        NodeBuilder defnb = newLuceneIndexDefinition(builder.child(INDEX_DEFINITIONS_NAME),
+                "lucene", of(TYPENAME_STRING));
+        defnb.child(ANALYZERS).child(ANL_DEFAULT)
+                .child(LuceneIndexConstants.ANL_TOKENIZER)
+                .setProperty(LuceneIndexConstants.ANL_NAME, "whitespace");
+        IndexDefinition defn = new IndexDefinition(root, defnb.getNodeState());
+        assertEquals(TokenizerChain.class.getName(), defn.getAnalyzer().getClass().getName());
+    }
+
     private static IndexingRule getRule(IndexDefinition defn, String typeName){
         return defn.getApplicableIndexingRule(newTree(newNode(typeName)));
     }
diff --git oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexTest.java oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexTest.java
index 93ee6bb..cee4ca0 100644
--- oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexTest.java
+++ oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexTest.java
@@ -26,10 +26,12 @@ import javax.annotation.Nullable;
 import javax.jcr.PropertyType;
 
 import static com.google.common.collect.ImmutableList.copyOf;
+import static com.google.common.collect.ImmutableSet.of;
 import static com.google.common.collect.Iterators.transform;
 import static com.google.common.collect.Lists.newArrayList;
 import static com.google.common.collect.Sets.newHashSet;
 import static com.google.common.util.concurrent.MoreExecutors.sameThreadExecutor;
+import static javax.jcr.PropertyType.TYPENAME_STRING;
 import static junit.framework.Assert.assertEquals;
 import static junit.framework.Assert.assertFalse;
 import static junit.framework.Assert.assertTrue;
@@ -37,10 +39,16 @@ import static org.apache.jackrabbit.JcrConstants.JCR_SYSTEM;
 import static org.apache.jackrabbit.JcrConstants.NT_BASE;
 import static org.apache.jackrabbit.oak.plugins.index.IndexConstants.INDEX_DEFINITIONS_NAME;
 import static org.apache.jackrabbit.oak.plugins.index.IndexConstants.REINDEX_PROPERTY_NAME;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.ANALYZERS;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.ANL_DEFAULT;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.ANL_FILTERS;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.ANL_NAME;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.ANL_TOKENIZER;
 import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.INDEX_RULES;
 import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.PERSISTENCE_FILE;
 import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.PERSISTENCE_NAME;
 import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.PERSISTENCE_PATH;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.util.LuceneIndexHelper.newLuceneIndexDefinition;
 import static org.apache.jackrabbit.oak.plugins.index.lucene.util.LuceneIndexHelper.newLucenePropertyIndexDefinition;
 import static org.apache.jackrabbit.oak.plugins.nodetype.NodeTypeConstants.JCR_NODE_TYPES;
 import static org.apache.jackrabbit.oak.plugins.nodetype.write.InitialContent.INITIAL_CONTENT;
@@ -50,11 +58,13 @@ import static org.apache.jackrabbit.oak.spi.query.QueryIndex.IndexPlan;
 import com.google.common.base.Function;
 import org.apache.commons.io.FileUtils;
 import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.IndexConstants;
 import org.apache.jackrabbit.oak.plugins.index.IndexUpdateProvider;
 import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore;
 import org.apache.jackrabbit.oak.query.QueryEngineSettings;
 import org.apache.jackrabbit.oak.query.ast.Operator;
 import org.apache.jackrabbit.oak.query.ast.SelectorImpl;
+import org.apache.jackrabbit.oak.query.fulltext.FullTextTerm;
 import org.apache.jackrabbit.oak.query.index.FilterImpl;
 import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
 import org.apache.jackrabbit.oak.spi.commit.EditorHook;
@@ -261,6 +271,52 @@ public class LuceneIndexTest {
     }
 
     @Test
+    public void analyzerWithStopWords() throws Exception{
+        NodeBuilder nb = newLuceneIndexDefinition(builder.child(INDEX_DEFINITIONS_NAME), "lucene",
+                of(TYPENAME_STRING));
+        TestUtil.useV2(nb);
+        NodeState before = builder.getNodeState();
+        builder.setProperty("foo", "fox jumping");
+        NodeState after = builder.getNodeState();
+
+        NodeState indexed = HOOK.processCommit(before, after,CommitInfo.EMPTY);
+
+        IndexTracker tracker = new IndexTracker();
+        tracker.update(indexed);
+        AdvancedQueryIndex queryIndex = new LucenePropertyIndex(tracker);
+
+        FilterImpl filter = createFilter("nt:base");
+
+        filter.setFullTextConstraint(new FullTextTerm(null, "fox jumping", false, false, null));
+        assertFilter(filter, queryIndex, indexed, ImmutableList.of("/"));
+
+        //No stop word configured so default analyzer would also check for 'was'
+        filter.setFullTextConstraint(new FullTextTerm(null, "fox was jumping", false, false, null));
+        assertFilter(filter, queryIndex, indexed, Collections.<String>emptyList());
+
+        //Change the default analyzer to use the default stopword set
+        //and trigger a reindex such that new analyzer is used
+        NodeBuilder anlnb = nb.child(ANALYZERS).child(ANL_DEFAULT);
+        anlnb.child(ANL_TOKENIZER).setProperty(ANL_NAME, "whitespace");
+        anlnb.child(ANL_FILTERS).child("stop");
+        nb.setProperty(IndexConstants.REINDEX_PROPERTY_NAME, true);
+
+        before = after;
+        after = builder.getNodeState();
+
+        indexed = HOOK.processCommit(before, after,CommitInfo.EMPTY);
+        tracker.update(indexed);
+        queryIndex = new LucenePropertyIndex(tracker);
+
+        filter.setFullTextConstraint(new FullTextTerm(null, "fox jumping", false, false, null));
+        assertFilter(filter, queryIndex, indexed, ImmutableList.of("/"));
+
+        //Now this should get passed as the analyzer would ignore 'was'
+        filter.setFullTextConstraint(new FullTextTerm(null, "fox was jumping", false, false, null));
+        assertFilter(filter, queryIndex, indexed, ImmutableList.of("/"));
+    }
+
+    @Test
     public void testTokens() {
         Analyzer analyzer = LuceneIndexConstants.ANALYZER;
         assertEquals(ImmutableList.of("parent", "child"),
diff --git oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/NodeStateAnalyzerFactoryTest.java oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/NodeStateAnalyzerFactoryTest.java
new file mode 100644
index 0000000..d5aac4c
--- /dev/null
+++ oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/NodeStateAnalyzerFactoryTest.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.jackrabbit.oak.plugins.index.lucene;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintWriter;
+import java.io.Reader;
+import java.lang.reflect.Field;
+import java.util.Map;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.oak.plugins.index.lucene.NodeStateAnalyzerFactory.NodeStateResourceLoader;
+import org.apache.jackrabbit.oak.plugins.index.lucene.util.TokenizerChain;
+import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
+import org.apache.lucene.analysis.charfilter.HTMLStripCharFilterFactory;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.core.LowerCaseFilterFactory;
+import org.apache.lucene.analysis.core.LowerCaseTokenizer;
+import org.apache.lucene.analysis.core.StopFilterFactory;
+import org.apache.lucene.analysis.core.WhitespaceTokenizerFactory;
+import org.apache.lucene.analysis.path.PathHierarchyTokenizerFactory;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.analysis.util.ClasspathResourceLoader;
+import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
+import org.apache.lucene.util.Version;
+import org.junit.Test;
+
+import static org.apache.jackrabbit.JcrConstants.JCR_CONTENT;
+import static org.apache.jackrabbit.JcrConstants.JCR_DATA;
+import static org.apache.jackrabbit.oak.api.Type.NAMES;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.ANL_CHAR_FILTERS;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.ANL_CLASS;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.ANL_LUCENE_MATCH_VERSION;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.ANL_NAME;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.ANL_FILTERS;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.ANL_TOKENIZER;
+import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
+import static org.apache.jackrabbit.oak.plugins.tree.TreeConstants.OAK_CHILD_ORDER;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+public class NodeStateAnalyzerFactoryTest {
+
+    private NodeStateAnalyzerFactory factory = new NodeStateAnalyzerFactory(LuceneIndexConstants.VERSION);
+
+    @Test
+    public void analyzerViaReflection() throws Exception{
+        NodeBuilder nb = EMPTY_NODE.builder();
+        nb.setProperty(ANL_CLASS, TestAnalyzer.class.getName());
+
+        TestAnalyzer analyzer = (TestAnalyzer) factory.createInstance(nb.getNodeState());
+        assertNotNull(analyzer);
+        assertEquals(LuceneIndexConstants.VERSION, analyzer.matchVersion);
+
+        nb.setProperty(LuceneIndexConstants.ANL_LUCENE_MATCH_VERSION, Version.LUCENE_31.toString());
+        analyzer = (TestAnalyzer) factory.createInstance(nb.getNodeState());
+        assertEquals("Version field not picked from config",Version.LUCENE_31, analyzer.matchVersion);
+
+        byte[] stopWords = newCharArraySet("foo", "bar");
+        createFileNode(nb, LuceneIndexConstants.ANL_STOPWORDS, stopWords);
+        analyzer = (TestAnalyzer) factory.createInstance(nb.getNodeState());
+
+        assertTrue("Configured stopword set not used",analyzer.getStopwordSet().contains("foo"));
+    }
+
+    @Test
+    public void analyzerByComposition_Tokenizer() throws Exception{
+        NodeBuilder nb = EMPTY_NODE.builder();
+        nb.child(ANL_TOKENIZER).setProperty(ANL_NAME, "whitespace");
+
+        TokenizerChain analyzer = (TokenizerChain) factory.createInstance(nb.getNodeState());
+        assertEquals(WhitespaceTokenizerFactory.class.getName(), analyzer.getTokenizer().getClassArg());
+
+        nb.child(ANL_TOKENIZER)
+                .setProperty(ANL_NAME, "pathhierarchy")
+                .setProperty("delimiter", "#");
+        analyzer = (TokenizerChain) factory.createInstance(nb.getNodeState());
+        assertEquals(PathHierarchyTokenizerFactory.class.getName(), analyzer.getTokenizer().getClassArg());
+        assertEquals('#', getValue(analyzer.getTokenizer(), "delimiter"));
+    }
+
+    @Test
+    public void analyzerByComposition_TokenFilter() throws Exception{
+        NodeBuilder nb = EMPTY_NODE.builder();
+        nb.child(ANL_TOKENIZER).setProperty(ANL_NAME, "whitespace");
+
+        NodeBuilder filters = nb.child(ANL_FILTERS);
+        filters.setProperty(OAK_CHILD_ORDER, ImmutableList.of("stop", "LowerCase"),NAMES);
+        filters.child("LowerCase").setProperty(ANL_NAME, "LowerCase");
+        //name is optional. Derived from nodeName
+        filters.child("stop").setProperty(ANL_LUCENE_MATCH_VERSION, Version.LUCENE_31.toString());
+
+        TokenizerChain analyzer = (TokenizerChain) factory.createInstance(nb.getNodeState());
+        assertEquals(2, analyzer.getFilters().length);
+
+        //check the order
+        assertEquals(StopFilterFactory.class.getName(), analyzer.getFilters()[0].getClassArg());
+        assertEquals(LowerCaseFilterFactory.class.getName(), analyzer.getFilters()[1].getClassArg());
+
+        assertTrue(analyzer.getFilters()[0].isExplicitLuceneMatchVersion());
+    }
+
+    @Test
+    public void analyzerByComposition_CharFilter() throws Exception{
+        NodeBuilder nb = EMPTY_NODE.builder();
+        nb.child(ANL_TOKENIZER).setProperty(ANL_NAME, "whitespace");
+
+        NodeBuilder filters = nb.child(ANL_CHAR_FILTERS);
+        filters.setProperty(OAK_CHILD_ORDER, ImmutableList.of("htmlStrip", "mapping"),NAMES);
+        filters.child("mapping").setProperty(ANL_NAME, "mapping");
+        filters.child("htmlStrip"); //name is optional. Derived from nodeName
+
+        TokenizerChain analyzer = (TokenizerChain) factory.createInstance(nb.getNodeState());
+        assertEquals(2, analyzer.getCharFilters().length);
+
+        //check the order
+        assertEquals(HTMLStripCharFilterFactory.class.getName(), analyzer.getCharFilters()[0].getClassArg());
+        assertEquals(MappingCharFilterFactory.class.getName(), analyzer.getCharFilters()[1].getClassArg());
+    }
+
+    @Test
+    public void analyzerByComposition_FileResource() throws Exception{
+        NodeBuilder nb = EMPTY_NODE.builder();
+        nb.child(ANL_TOKENIZER).setProperty(ANL_NAME, "whitespace");
+
+        NodeBuilder filters = nb.child(ANL_FILTERS);
+        //name is optional. Derived from nodeName
+        NodeBuilder stop = filters.child("stop");
+        stop.setProperty("words", "set1.txt, set2.txt");
+        createFileNode(stop, "set1.txt", newCharArraySet("foo", "bar"));
+        createFileNode(stop, "set2.txt", newCharArraySet("foo1", "bar1"));
+
+        TokenizerChain analyzer = (TokenizerChain) factory.createInstance(nb.getNodeState());
+        assertEquals(1, analyzer.getFilters().length);
+
+        //check the order
+        assertEquals(StopFilterFactory.class.getName(), analyzer.getFilters()[0].getClassArg());
+
+        StopFilterFactory sff = (StopFilterFactory) analyzer.getFilters()[0];
+        assertTrue(sff.getStopWords().contains("foo"));
+        assertTrue(sff.getStopWords().contains("foo1"));
+    }
+
+    @Test
+    public void nodeStateResourceLoader() throws Exception{
+        byte[] testData = "hello".getBytes();
+        NodeBuilder nb = EMPTY_NODE.builder();
+        createFileNode(nb, "foo", testData);
+
+        NodeStateResourceLoader loader = new NodeStateResourceLoader(nb.getNodeState(),
+                new ClasspathResourceLoader());
+        assertArrayEquals(testData, IOUtils.toByteArray(loader.openResource("foo")));
+    }
+
+    @Test
+    public void nodeStateAsMap() throws Exception{
+        NodeBuilder nb = EMPTY_NODE.builder();
+        nb.setProperty("a", "a");
+        nb.setProperty("b", 1);
+
+        Map<String, String> result = factory.convertNodeState(nb.getNodeState());
+        assertEquals("a", result.get("a"));
+        assertEquals("1", result.get("b"));
+    }
+
+    private static NodeBuilder createFileNode(NodeBuilder nb, String nodeName, byte[] content){
+        return nb.child(nodeName).child(JCR_CONTENT).setProperty(JCR_DATA, content);
+    }
+
+    private static byte[] newCharArraySet(String ... words){
+        ByteArrayOutputStream baos = new ByteArrayOutputStream();
+        PrintWriter pw = new PrintWriter(baos);
+        for (String word : words){
+            pw.println(word);
+        }
+        pw.close();
+        return baos.toByteArray();
+    }
+
+    public static class TestAnalyzer extends StopwordAnalyzerBase{
+        final Version matchVersion;
+
+        public TestAnalyzer(Version matchVersion) {
+            super(matchVersion);
+            this.matchVersion = matchVersion;
+        }
+
+        public TestAnalyzer(Version version, CharArraySet stopwords) {
+            super(version, stopwords);
+            this.matchVersion = version;
+        }
+
+        @Override
+        protected TokenStreamComponents createComponents(final String fieldName, final Reader reader) {
+            return new TokenStreamComponents(new LowerCaseTokenizer(matchVersion, reader));
+        }
+    }
+
+    private static Object getValue(Object o, String fieldName) throws NoSuchFieldException, IllegalAccessException {
+        Field f = o.getClass().getDeclaredField(fieldName);
+        f.setAccessible(true);
+        return f.get(o);
+    }
+}
