diff --git oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java
index f135aee..a5c0046 100644
--- oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java
+++ oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java
@@ -29,6 +29,7 @@ import java.util.Set;
 import java.util.concurrent.atomic.AtomicReference;
 
 import javax.annotation.CheckForNull;
+import javax.annotation.Nullable;
 import javax.jcr.PropertyType;
 
 import com.google.common.collect.AbstractIterator;
@@ -72,6 +73,10 @@ import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.queryparser.classic.ParseException;
 import org.apache.lucene.queryparser.classic.QueryParser;
+import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
+import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler;
+import org.apache.lucene.queryparser.flexible.standard.StandardQueryParser;
+import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.IndexSearcher;
@@ -808,9 +813,9 @@ public class LucenePropertyIndex implements AdvancedQueryIndex, QueryIndex, Nati
             @Override
             public boolean visit(FullTextContains contains) {
                 // TODO
-                // visitTerm(contains.getPropertyName(), contains.getRawText(), null, false);
-                // return true;
-                return contains.getBase().accept(this);
+                visitTerm(contains.getPropertyName(), contains.getRawText(), null, false);
+                return true;
+//                return contains.getBase().accept(this);
             }
 
             @Override
@@ -849,10 +854,7 @@ public class LucenePropertyIndex implements AdvancedQueryIndex, QueryIndex, Nati
             }
             
             private boolean visitTerm(String propertyName, String text, String boost, boolean not) {
-                String p = propertyName;
-                if (p != null) {
-                    p = getLuceneFieldName(p, pr);
-                }
+                String p = getLuceneFieldName(propertyName, pr);
                 Query q = tokenToQuery(text, p, analyzer, reader);
                 if (q == null) {
                     return false;
@@ -873,7 +875,11 @@ public class LucenePropertyIndex implements AdvancedQueryIndex, QueryIndex, Nati
         return result.get();
     }
 
-    static String getLuceneFieldName(String p, PlanResult pr) {
+    static String getLuceneFieldName(@Nullable String p, PlanResult pr) {
+        if (p == null){
+            return FieldNames.FULLTEXT;
+        }
+
         if (isNodePath(p)){
             if (pr.isPathTransformed()){
                 p = PathUtils.getName(p);
@@ -888,173 +894,70 @@ public class LucenePropertyIndex implements AdvancedQueryIndex, QueryIndex, Nati
             }
             p = FieldNames.createAnalyzedFieldName(p);
         }
+
+        if ("*".equals(p)){
+            p = FieldNames.FULLTEXT;
+        }
         return p;
     }
 
-    static Query tokenToQuery(String text, String fieldName, Analyzer analyzer, IndexReader reader) {
+    static Query tokenToQuery(String text, String fieldName, Analyzer analyzer,
+                              IndexReader reader) {
         if (analyzer == null) {
             return null;
         }
-        List<String> tokens = tokenize(text, analyzer);
-
-        if (tokens.isEmpty()) {
-            // TODO what should be returned in the case there are no tokens?
-            return new BooleanQuery();
-        }
-        if (tokens.size() == 1) {
-            String token = tokens.iterator().next();
-            if (hasFulltextToken(token)) {
-                return new WildcardQuery(newFulltextTerm(token, fieldName));
-            } else {
-                return new TermQuery(newFulltextTerm(token, fieldName));
-            }
-        } else {
-            if (hasFulltextToken(tokens)) {
-                MultiPhraseQuery mpq = new MultiPhraseQuery();
-                for(String token: tokens){
-                    if (hasFulltextToken(token)) {
-                        Term[] terms = extractMatchingTokens(reader, fieldName, token);
-                        if (terms != null && terms.length > 0) {
-                            mpq.add(terms);
-                        }
-                    } else {
-                        mpq.add(newFulltextTerm(token, fieldName));
-                    }
-                }
-                return mpq;
-            } else {
-                PhraseQuery pq = new PhraseQuery();
-                for (String t : tokens) {
-                    pq.add(newFulltextTerm(t, fieldName));
-                }
-                return pq;
-            }
-        }
-    }
+        StandardQueryParser parserHelper = new StandardQueryParser(analyzer);
+        parserHelper.setAllowLeadingWildcard(true);
+        parserHelper.setDefaultOperator(StandardQueryConfigHandler.Operator.AND);
 
-    private static Term[] extractMatchingTokens(IndexReader reader, String fieldName, String token) {
-        if (reader == null) {
-            // getPlan call
-            return null;
-        }
+        text = rewriteQueryText(text);
 
         try {
-            List<Term> terms = new ArrayList<Term>();
-            Term onTerm = newFulltextTerm(token, fieldName);
-            Terms t = MultiFields.getTerms(reader, onTerm.field());
-            Automaton a = WildcardQuery.toAutomaton(onTerm);
-            CompiledAutomaton ca = new CompiledAutomaton(a);
-            TermsEnum te = ca.getTermsEnum(t);
-            BytesRef text;
-            while ((text = te.next()) != null) {
-                terms.add(newFulltextTerm(text.utf8ToString(), fieldName));
-            }
-            return terms.toArray(new Term[terms.size()]);
-        } catch (IOException e) {
-            LOG.error("Building fulltext query failed", e.getMessage());
-            return null;
-        }
-    }
-
-    private static boolean hasFulltextToken(List<String> tokens) {
-        for (String token : tokens) {
-            if (hasFulltextToken(token)) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    private static boolean hasFulltextToken(String token) {
-        for (char c : fulltextTokens) {
-            if (token.indexOf(c) != -1) {
-                return true;
-            }
+            return parserHelper.parse(text, fieldName);
+        } catch (QueryNodeException e) {
+            throw new RuntimeException(e);
         }
-        return false;
     }
 
-    private static char[] fulltextTokens = new char[] { '*', '?' };
-
-    /**
-     * Tries to merge back tokens that are split on relevant fulltext query
-     * wildcards ('*' or '?')
-     *
-     *
-     * @param text
-     * @param analyzer
-     * @return
-     */
-    static List<String> tokenize(String text, Analyzer analyzer) {
-        List<String> tokens = new ArrayList<String>();
-        TokenStream stream = null;
-        try {
-            stream = analyzer.tokenStream(FieldNames.FULLTEXT,
-                    new StringReader(text));
-            CharTermAttribute termAtt = stream
-                    .addAttribute(CharTermAttribute.class);
-            OffsetAttribute offsetAtt = stream
-                    .addAttribute(OffsetAttribute.class);
-            // TypeAttribute type = stream.addAttribute(TypeAttribute.class);
-
-            stream.reset();
-
-            int poz = 0;
-            boolean hasFulltextToken = false;
-            StringBuilder token = new StringBuilder();
-            while (stream.incrementToken()) {
-                String term = termAtt.toString();
-                int start = offsetAtt.startOffset();
-                int end = offsetAtt.endOffset();
-                if (start > poz) {
-                    for (int i = poz; i < start; i++) {
-                        for (char c : fulltextTokens) {
-                            if (c == text.charAt(i)) {
-                                token.append(c);
-                                hasFulltextToken = true;
-                            }
-                        }
-                    }
-                }
-                poz = end;
-                if (hasFulltextToken) {
-                    token.append(term);
-                    hasFulltextToken = false;
+    private static String rewriteQueryText(String textsearch){
+        // replace escaped ' with just '
+        StringBuilder rewritten = new StringBuilder();
+        // the default lucene query parser recognizes 'AND' and 'NOT' as
+        // keywords.
+        textsearch = textsearch.replaceAll("AND", "and");
+        textsearch = textsearch.replaceAll("NOT", "not");
+        boolean escaped = false;
+        for (int i = 0; i < textsearch.length(); i++) {
+            if (textsearch.charAt(i) == '\\') {
+                if (escaped) {
+                    rewritten.append("\\\\");
+                    escaped = false;
                 } else {
-                    if (token.length() > 0) {
-                        tokens.add(token.toString());
-                    }
-                    token = new StringBuilder();
-                    token.append(term);
+                    escaped = true;
                 }
-            }
-            // consume to the end of the string
-            if (poz < text.length()) {
-                for (int i = poz; i < text.length(); i++) {
-                    for (char c : fulltextTokens) {
-                        if (c == text.charAt(i)) {
-                            token.append(c);
-                        }
-                    }
+            } else if (textsearch.charAt(i) == '\'') {
+                if (escaped) {
+                    escaped = false;
                 }
-            }
-            if (token.length() > 0) {
-                tokens.add(token.toString());
-            }
-            stream.end();
-        } catch (IOException e) {
-            LOG.error("Building fulltext query failed", e.getMessage());
-            return null;
-        } finally {
-            try {
-                if (stream != null) {
-                    stream.close();
+                rewritten.append(textsearch.charAt(i));
+            }/* else if (textsearch.charAt(i) == '~') {
+                if (i == 0 || Character.isWhitespace(textsearch.charAt(i - 1))) {
+                    // escape tilde so we can use it for similarity query
+                    rewritten.append("\\");
+                }
+                rewritten.append('~');
+            }*/ else if (textsearch.charAt(i) == ':') {
+                // fields as known in lucene are not supported
+                rewritten.append("\\:");
+            } else {
+                if (escaped) {
+                    rewritten.append('\\');
+                    escaped = false;
                 }
-            } catch (IOException e) {
-                // ignore
+                rewritten.append(textsearch.charAt(i));
             }
         }
-        return tokens;
+        return rewritten.toString();
     }
 
     private static String getPathRestriction(IndexPlan plan) {
diff --git oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexQueryTest.java oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexQueryTest.java
index 02c9a4e..8993838 100644
--- oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexQueryTest.java
+++ oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexQueryTest.java
@@ -185,9 +185,15 @@ public class LuceneIndexQueryTest extends AbstractQueryTest {
     public void multiPhraseQuery() throws Exception {
         Tree test = root.getTree("/").addChild("test");
         test.addChild("a").setProperty("dc:format", "type:application/pdf");
+        test.addChild("b").setProperty("dc:format", "progress");
         root.commit();
 
         assertQuery(
+                "/jcr:root//*[jcr:contains(@dc:format, 'pro*')]",
+                "xpath", ImmutableList.of("/test/b"));
+
+
+        assertQuery(
                 "/jcr:root//*[jcr:contains(@dc:format, 'type:appli*')]",
                 "xpath", ImmutableList.of("/test/a"));
 
