Index: src/test/java/org/apache/jackrabbit/core/TestHelper.java
===================================================================
--- src/test/java/org/apache/jackrabbit/core/TestHelper.java	(revision 1343703)
+++ src/test/java/org/apache/jackrabbit/core/TestHelper.java	(working copy)
@@ -105,13 +105,17 @@
     /**
      * wait for async text-extraction tasks to finish
      */
-    public static void waitForTextExtractionTasksToFinish(Session session) throws Exception {
+    public static void waitForTextExtractionTasksToFinish(Session session) {
         final RepositoryContext context = JackrabbitRepositoryStub
                 .getRepositoryContext(session.getRepository());
         JackrabbitThreadPool jtp = ((JackrabbitThreadPool) context
                 .getExecutor());
         while (jtp.getPendingLowPriorityTaskCount() != 0) {
-            TimeUnit.MILLISECONDS.sleep(100);
+            try {
+                TimeUnit.MILLISECONDS.sleep(100);
+            } catch (InterruptedException e) {
+                //
+            }
         }
     }
 }
Index: src/test/java/org/apache/jackrabbit/core/query/AbstractQueryTest.java
===================================================================
--- src/test/java/org/apache/jackrabbit/core/query/AbstractQueryTest.java	(revision 1343703)
+++ src/test/java/org/apache/jackrabbit/core/query/AbstractQueryTest.java	(working copy)
@@ -214,7 +214,7 @@
         // check if all expected are in result
         for (Iterator<String> it = expectedPaths.iterator(); it.hasNext();) {
             String path = it.next();
-            assertTrue(path + " is not part of the result set "+ expectedPaths, resultPaths.contains(path));
+            assertTrue(path + " is not part of the result set "+ resultPaths, resultPaths.contains(path));
         }
         // check result does not contain more than expected
         for (Iterator<String> it = resultPaths.iterator(); it.hasNext();) {
@@ -251,6 +251,7 @@
      */
     protected QueryResult executeQuery(String statement)
             throws RepositoryException {
+        getSearchIndex().flush();
         if (statement.trim().toLowerCase().startsWith("select")) {
             return qm.createQuery(statement, Query.SQL).execute();
         } else {
Index: src/test/java/org/apache/jackrabbit/core/query/XPathAxisTest.java
===================================================================
--- src/test/java/org/apache/jackrabbit/core/query/XPathAxisTest.java	(revision 1343703)
+++ src/test/java/org/apache/jackrabbit/core/query/XPathAxisTest.java	(working copy)
@@ -43,6 +43,13 @@
     }
 
     protected void tearDown() throws Exception {
+        n11.remove();
+        n12.remove();
+        n1.remove();
+        n21.remove();
+        n22.remove();
+        n2.remove();
+        testRootNode.save();
         n1 = null;
         n2 = null;
         n11 = null;
Index: src/test/java/org/apache/jackrabbit/core/query/lucene/IndexMigrationTest.java
===================================================================
--- src/test/java/org/apache/jackrabbit/core/query/lucene/IndexMigrationTest.java	(revision 1343703)
+++ src/test/java/org/apache/jackrabbit/core/query/lucene/IndexMigrationTest.java	(working copy)
@@ -16,7 +16,13 @@
  */
 package org.apache.jackrabbit.core.query.lucene;
 
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
 import junit.framework.TestCase;
+
+import org.apache.jackrabbit.core.id.NodeId;
 import org.apache.jackrabbit.core.query.lucene.directory.DirectoryManager;
 import org.apache.jackrabbit.core.query.lucene.directory.RAMDirectoryManager;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
@@ -26,10 +32,6 @@
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.Version;
 
-import java.util.ArrayList;
-import java.util.List;
-import java.util.UUID;
-
 /**
  * <code>IndexMigrationTest</code> contains a test case for JCR-2393.
  */
@@ -50,7 +52,7 @@
         DirectoryManager dirMgr = new RAMDirectoryManager();
 
         PersistentIndex idx = new PersistentIndex("index",
-                new StandardAnalyzer(Version.LUCENE_24), Similarity.getDefault(),
+                new StandardAnalyzer(Version.LUCENE_36), Similarity.getDefault(),
                 new DocNumberCache(100),
                 new IndexingQueue(new IndexingQueueStore(new RAMDirectory())),
                 dirMgr, 0);
@@ -66,8 +68,8 @@
 
     protected static Document createDocument(String name, String value) {
         Document doc = new Document();
-        doc.add(new Field(FieldNames.UUID, false, UUID.randomUUID().toString(),
-                Field.Store.YES, Field.Index.NO, Field.TermVector.NO));
+        // id first
+        doc.add(new IDField(NodeId.valueOf(UUID.randomUUID().toString())));
         doc.add(new Field(FieldNames.PROPERTIES, false, createNamedValue14(
                 name, value), Field.Store.NO, Field.Index.NOT_ANALYZED,
                 Field.TermVector.NO));
Index: src/test/java/org/apache/jackrabbit/core/query/lucene/ChainedTermEnumTest.java
===================================================================
--- src/test/java/org/apache/jackrabbit/core/query/lucene/ChainedTermEnumTest.java	(revision 1343703)
+++ src/test/java/org/apache/jackrabbit/core/query/lucene/ChainedTermEnumTest.java	(working copy)
@@ -22,6 +22,7 @@
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermEnum;
 import org.apache.lucene.store.Directory;
@@ -61,21 +62,32 @@
     protected TermEnum createTermEnum(String prefix, int numTerms)
             throws IOException {
         Directory dir = new RAMDirectory();
-        IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(Version.LUCENE_24),
-                true, IndexWriter.MaxFieldLength.UNLIMITED);
-        for (int i = 0; i < numTerms; i++) {
-            Document doc = new Document();
-            doc.add(new Field("field", true, prefix + i, Field.Store.NO,
-                    Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO));
-            writer.addDocument(doc);
+        IndexWriter writer = null;
+        try {
+            writer = new IndexWriter(dir, new IndexWriterConfig(
+                    Version.LUCENE_36, new StandardAnalyzer(Version.LUCENE_36)));
+            for (int i = 0; i < numTerms; i++) {
+                Document doc = new Document();
+                doc.add(new Field("field", true, prefix + i, Field.Store.NO,
+                        Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO));
+                writer.addDocument(doc);
+            }
+        } finally {
+            if (writer != null)
+                writer.close();
         }
-        writer.close();
-        IndexReader reader = IndexReader.open(dir, false);
-        TermEnum terms = reader.terms();
-        if (terms.term() == null) {
-            // position at first term
-            terms.next();
+        IndexReader reader = null;
+        try {
+            reader = IndexReader.open(dir);
+            TermEnum terms = reader.terms();
+            if (terms.term() == null) {
+                // position at first term
+                terms.next();
+            }
+            return terms;
+        } finally {
+            if (reader != null)
+                reader.close();
         }
-        return terms;
     }
 }
Index: src/test/java/org/apache/jackrabbit/core/query/FulltextQueryTest.java
===================================================================
--- src/test/java/org/apache/jackrabbit/core/query/FulltextQueryTest.java	(revision 1343703)
+++ src/test/java/org/apache/jackrabbit/core/query/FulltextQueryTest.java	(working copy)
@@ -19,7 +19,6 @@
 import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.concurrent.Callable;
 
 import javax.jcr.Node;
 import javax.jcr.RepositoryException;
@@ -369,12 +368,13 @@
 
     private void assertContainsQuery(String statement, boolean match)
             throws InvalidQueryException, RepositoryException {
+        getSearchIndex().flush();
+
         StringBuffer stmt = new StringBuffer();
         stmt.append("/jcr:root").append(testRoot).append("/*");
         stmt.append("[jcr:contains(., '").append(statement);
         stmt.append("')]");
-
-        Query q = superuser.getWorkspace().getQueryManager().createQuery(stmt.toString(), Query.XPATH);
+        Query q = qm.createQuery(stmt.toString(), Query.XPATH);
         checkResult(q.execute(), match ? 1 : 0);
 
         stmt = new StringBuffer();
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/CachingMultiIndexReader.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/CachingMultiIndexReader.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/CachingMultiIndexReader.java	(working copy)
@@ -101,7 +101,7 @@
      * @throws IOException if an error occurs while reading from the index.
      */
     public DocId getParentDocId(int n) throws IOException {
-        int i = readerIndex(n);
+        int i = readerIndexJr(n);
         DocId id = subReaders[i].getParent(n - starts[i]);
         return id.applyOffset(starts[i]);
     }
@@ -220,7 +220,7 @@
      * @param n document number.
      * @return the reader index.
      */
-    private int readerIndex(int n) {
+    private int readerIndexJr(int n) {
         int lo = 0;                                      // search starts array
         int hi = subReaders.length - 1;                  // for first element less
 
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/MultiScorer.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/MultiScorer.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/MultiScorer.java	(working copy)
@@ -16,11 +16,11 @@
  */
 package org.apache.jackrabbit.core.query.lucene;
 
+import java.io.IOException;
+
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Similarity;
 
-import java.io.IOException;
-
 /**
  * <code>MultiScorer</code> spans multiple Scorers and returns document numbers
  * and score values in the order as supplied to the constructor of this
@@ -65,7 +65,7 @@
     @Override
     public int nextDoc() throws IOException {
         while (currentDoc != NO_MORE_DOCS) {
-            if (scorers[currentScorer].nextDoc() != NO_MORE_DOCS) {
+            if (scorers[currentScorer]!= null && scorers[currentScorer].nextDoc() != NO_MORE_DOCS) {
                 currentDoc = scorers[currentScorer].docID() + starts[currentScorer];
                 return currentDoc;
             } else if (++currentScorer < scorers.length) {
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/CommittableIndexReader.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/CommittableIndexReader.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/CommittableIndexReader.java	(working copy)
@@ -137,4 +137,14 @@
     BitSet getDeletedDocs() {
         return (BitSet) deletedDocs.clone();
     }
+
+    @Override
+    public String toString() {
+      final StringBuilder buffer = new StringBuilder("CommittableIndexReader(");
+      buffer.append(in);
+      buffer.append(',');
+      buffer.append(modCount);
+      buffer.append(')');
+      return buffer.toString();
+    }
 }
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/JackrabbitQueryParser.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/JackrabbitQueryParser.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/JackrabbitQueryParser.java	(working copy)
@@ -18,17 +18,18 @@
 
 import java.io.IOException;
 import java.io.StringReader;
-import java.util.List;
 import java.util.ArrayList;
+import java.util.List;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.standard.ClassicTokenizer;
 import org.apache.lucene.analysis.standard.StandardTokenizer;
 import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.queryParser.QueryParser;
 import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.search.Query;
+import org.apache.lucene.queryParser.QueryParser;
 import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.Query;
 import org.apache.lucene.util.Version;
 
 /**
@@ -56,11 +57,13 @@
                                  Analyzer analyzer,
                                  SynonymProvider synonymProvider,
                                  PerQueryCache cache) {
-        super(Version.LUCENE_24, fieldName, analyzer);
+        super(Version.LUCENE_36, fieldName, analyzer);
         this.synonymProvider = synonymProvider;
         this.cache = cache;
         setAllowLeadingWildcard(true);
         setDefaultOperator(Operator.AND);
+        // TODO check me
+        setAutoGeneratePhraseQueries(true);
     }
 
     /**
@@ -135,17 +138,24 @@
         }
     }
 
-
     /**
      * {@inheritDoc}
      */
     protected Query getFieldQuery(String field, String queryText)
             throws ParseException {
+        return getFieldQuery(field, queryText, true);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    protected Query getFieldQuery(String field, String queryText, boolean quoted)
+            throws ParseException {
         if (queryText.startsWith("~")) {
             // synonym query
             return getSynonymQuery(field, queryText.substring(1));
         } else {
-            return super.getFieldQuery(field, queryText);
+            return super.getFieldQuery(field, queryText, quoted);
         }
     }
 
@@ -158,13 +168,13 @@
         Analyzer a = getAnalyzer();
         TokenStream ts = a.tokenStream(field, new StringReader(termStr));
         int count = 0;
-        boolean isCJ = false;
+//        boolean isCJ = false;
         try {
-            TypeAttribute t = ts.addAttribute(TypeAttribute.class);
+//            TypeAttribute t = ts.addAttribute(TypeAttribute.class);
             ts.reset();
             while (ts.incrementToken()) {
                 count++;
-                isCJ = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.CJ].equals(t.type());
+//                isCJ = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.CJ].equals(t.type());
             }
             ts.end();
         } catch (IOException e) {
@@ -176,7 +186,7 @@
                 // ignore
             }
         }
-        if (count > 1 && isCJ) {
+        if (count > 1  /* && isCJ */ ) {
             return getFieldQuery(field, termStr);
         } else {
             return getWildcardQuery(field, termStr + "*");
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/directory/FSDirectoryManager.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/directory/FSDirectoryManager.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/directory/FSDirectoryManager.java	(working copy)
@@ -152,7 +152,7 @@
 
         @Override
         public String[] listAll() throws IOException {
-            File[] files = directory.getFile().listFiles(FILTER);
+            File[] files = directory.getDirectory().listFiles(FILTER);
             if (files == null) {
                 return null;
             }
@@ -173,7 +173,12 @@
             return directory.fileModified(name);
         }
 
+        /**
+         * @deprecated Lucene will remove this method
+         * @see org.apache.lucene.store.Directory#touchFile(java.lang.String)
+         */
         @Override
+        @Deprecated
         public void touchFile(String name) throws IOException {
             directory.touchFile(name);
         }
@@ -196,7 +201,7 @@
         @Override
         public IndexInput openInput(String name) throws IOException {
             IndexInput in = directory.openInput(name);
-            return new IndexInputLogWrapper(in);
+            return new IndexInputLogWrapper(name, in);
         }
 
         @Override
@@ -208,7 +213,7 @@
         public IndexInput openInput(String name, int bufferSize)
                 throws IOException {
             IndexInput in = directory.openInput(name, bufferSize);
-            return new IndexInputLogWrapper(in);
+            return new IndexInputLogWrapper(name, in);
         }
 
         @Override
@@ -222,7 +227,7 @@
         }
 
         @Override
-        public void setLockFactory(LockFactory lockFactory) {
+        public void setLockFactory(LockFactory lockFactory) throws IOException {
             directory.setLockFactory(lockFactory);
         }
 
@@ -249,7 +254,8 @@
 
         private IndexInput in;
 
-        IndexInputLogWrapper(IndexInput in) {
+        IndexInputLogWrapper(String name, IndexInput in) {
+            super(name);
             this.in = in;
         }
 
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/AbstractIndex.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/AbstractIndex.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/AbstractIndex.java	(working copy)
@@ -35,10 +35,14 @@
 import org.apache.lucene.index.IndexDeletionPolicy;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LogByteSizeMergePolicy;
 import org.apache.lucene.index.Payload;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TieredMergePolicy;
 import org.apache.lucene.search.Similarity;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Version;
 import org.apache.tika.io.IOExceptionWithCause;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -85,9 +89,9 @@
 
     /** Compound file flag */
     private boolean useCompoundFile = true;
-
-    /** maxFieldLength config parameter */
-    private int maxFieldLength = SearchIndex.DEFAULT_MAX_FIELD_LENGTH;
+//
+//    /** maxFieldLength config parameter */
+//    private int maxFieldLength = SearchIndex.DEFAULT_MAX_FIELD_LENGTH;
 
     /** termInfosIndexDivisor config parameter */
     private int termInfosIndexDivisor = SearchIndex.DEFAULT_TERM_INFOS_INDEX_DIVISOR;
@@ -142,8 +146,7 @@
         this.isExisting = IndexReader.indexExists(directory);
 
         if (!isExisting) {
-            indexWriter = new IndexWriter(directory, analyzer,
-                    IndexWriter.MaxFieldLength.LIMITED);
+            indexWriter = new IndexWriter(directory, new IndexWriterConfig(Version.LUCENE_36, analyzer));
             // immediately close, now that index has been created
             indexWriter.close();
             indexWriter = null;
@@ -291,7 +294,7 @@
                 return readOnlyReader;
             } else {
                 // reader outdated
-                if (readOnlyReader.getRefCount() == 1) {
+                if (readOnlyReader.getRefCountJr() == 1) {
                     // not in use, except by this index
                     // update the reader
                     readOnlyReader.updateDeletedDocs(modifiableReader);
@@ -308,7 +311,7 @@
         // if we get here there is no up-to-date read-only reader
         if (sharedReader == null) {
             // create new shared reader
-            IndexReader reader = IndexReader.open(getDirectory(), null, true, termInfosIndexDivisor);
+            IndexReader reader = IndexReader.open(getDirectory(), termInfosIndexDivisor);
             CachingIndexReader cr = new CachingIndexReader(
                     reader, cache, initCache);
             sharedReader = new SharedIndexReader(cr);
@@ -345,9 +348,11 @@
             indexReader = null;
         }
         if (indexWriter == null) {
-            indexWriter = new IndexWriter(getDirectory(), analyzer,
-                    new IndexWriter.MaxFieldLength(maxFieldLength));
-            indexWriter.setSimilarity(similarity);
+            IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_36, analyzer);
+            config.setSimilarity(similarity);
+            config.setMergePolicy(new LogByteSizeMergePolicy());
+
+            indexWriter = new IndexWriter(getDirectory(), config);
             indexWriter.setUseCompoundFile(useCompoundFile);
             indexWriter.setInfoStream(STREAM_LOGGER);
         }
@@ -381,6 +386,8 @@
         // optimize if requested
         if (optimize) {
             IndexWriter writer = getIndexWriter();
+            // writer.forceMerge(1, true);
+            // TODO optimize not needed
             writer.optimize();
             writer.close();
             indexWriter = null;
@@ -529,7 +536,10 @@
 
     /**
      * The lucene index writer property: useCompountFile
+     * 
+     * @deprecated lucene now uses {@link TieredMergePolicy} set via the {@link IndexWriterConfig}
      */
+    @Deprecated
     void setUseCompoundFile(boolean b) {
         useCompoundFile = b;
         if (indexWriter != null) {
@@ -539,12 +549,14 @@
 
     /**
      * The lucene index writer property: maxFieldLength
+     * 
+     * @deprecated
      */
     void setMaxFieldLength(int maxFieldLength) {
-        this.maxFieldLength = maxFieldLength;
-        if (indexWriter != null) {
-            indexWriter.setMaxFieldLength(maxFieldLength);
-        }
+//        this.maxFieldLength = maxFieldLength;
+//        if (indexWriter != null) {
+//            indexWriter.setMaxFieldLength(maxFieldLength);
+//        }
     }
 
     /**
@@ -571,7 +583,7 @@
      * @param f a lucene field.
      * @return the index parameter on <code>f</code>.
      */
-    private Field.Index getIndexParameter(Fieldable f) {
+    private static Field.Index getIndexParameter(Fieldable f) {
         if (!f.isIndexed()) {
             return Field.Index.NO;
         } else if (f.isTokenized()) {
@@ -587,7 +599,7 @@
      * @param f a lucene field.
      * @return the term vector parameter on <code>f</code>.
      */
-    private Field.TermVector getTermVectorParameter(Fieldable f) {
+    private static Field.TermVector getTermVectorParameter(Fieldable f) {
         if (f.isStorePositionWithTermVector() && f.isStoreOffsetWithTermVector()) {
             return Field.TermVector.WITH_POSITIONS_OFFSETS;
         } else if (f.isStorePositionWithTermVector()) {
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/NotQuery.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/NotQuery.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/NotQuery.java	(working copy)
@@ -188,6 +188,9 @@
 
             if (docNo == -1) {
                 // get first doc of context scorer
+                if(contextScorer == null) {
+                    return NO_MORE_DOCS;
+                }
                 int docId = contextScorer.nextDoc();
                 if (docId != NO_MORE_DOCS) {
                     contextNo = docId;
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/JackrabbitAnalyzer.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/JackrabbitAnalyzer.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/JackrabbitAnalyzer.java	(working copy)
@@ -23,7 +23,7 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.standard.ClassicAnalyzer;
 import org.apache.lucene.util.Version;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -42,8 +42,8 @@
     private static Logger log =
             LoggerFactory.getLogger(JackrabbitAnalyzer.class);
 
-    private static final Analyzer DEFAULT_ANALYZER =
-            new StandardAnalyzer(Version.LUCENE_24, Collections.emptySet());
+    private static final Analyzer DEFAULT_ANALYZER = new ClassicAnalyzer(
+            Version.LUCENE_36, Collections.emptySet());
 
     /**
      * Returns a new instance of the named Lucene {@link Analyzer} class,
@@ -74,7 +74,7 @@
             Class<?>[] types = constructor.getParameterTypes();
             if (types.length == 1 && types[0] == Version.class) {
                 try {
-                    return (Analyzer) constructor.newInstance(Version.LUCENE_24);
+                    return (Analyzer) constructor.newInstance(Version.LUCENE_36);
                 } catch (Exception e) {
                     cause = e;
                 }
@@ -132,7 +132,7 @@
      * Reader. If the fieldName (property) is configured to have a different
      * analyzer than the default, this analyzer is used for tokenization
      */
-    public TokenStream tokenStream(String fieldName, Reader reader) {
+    public final TokenStream tokenStream(String fieldName, Reader reader) {
         if (indexingConfig != null) {
             Analyzer propertyAnalyzer = indexingConfig.getPropertyAnalyzer(fieldName);
             if (propertyAnalyzer != null) {
@@ -143,7 +143,7 @@
     }
 
     @Override
-    public TokenStream reusableTokenStream(String fieldName, Reader reader)
+    public final TokenStream reusableTokenStream(String fieldName, Reader reader)
             throws IOException {
         if (indexingConfig != null) {
             Analyzer propertyAnalyzer = indexingConfig.getPropertyAnalyzer(fieldName);
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/ReadOnlyIndexReader.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/ReadOnlyIndexReader.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/ReadOnlyIndexReader.java	(working copy)
@@ -176,7 +176,12 @@
      * @exception UnsupportedOperationException always
      */
     @Override
-    protected void doCommit(Map<String,String> commitUserData) throws IOException { 
+    protected void doCommit(Map<String, String> commitUserData) throws IOException {
+        if (!hasChanges) {
+            // change in behavior: IndexReader does not check for hasChanges
+            // before calling doCommit();
+            return;
+        }
         throw new UnsupportedOperationException("IndexReader is read-only");
     }
 
@@ -222,6 +227,16 @@
         return new FilteredTermPositions(super.termPositions());
     }
 
+    @Override
+    public String toString() {
+      final StringBuilder buffer = new StringBuilder("ReadOnlyIndexReader(");
+      buffer.append(in);
+      buffer.append(',');
+      buffer.append(deletedDocsVersion);
+      buffer.append(')');
+      return buffer.toString();
+    }
+
     //----------------------< FilteredTermDocs >--------------------------------
 
     /**
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/IDField.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/IDField.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/IDField.java	(working copy)
@@ -21,6 +21,8 @@
 import org.apache.jackrabbit.core.id.NodeId;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.document.AbstractField;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 
 /**
  * <code>IDField</code> implements a lucene field for the id of a node.
@@ -37,7 +39,8 @@
         this.isStored = true;
         this.isTokenized = false;
         this.omitNorms = true;
-        this.omitTermFreqAndPositions = true;
+        setIndexOptions(IndexOptions.DOCS_ONLY);
+        setStoreTermVector(TermVector.NO);
     }
 
     public String stringValue() {
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/IndexFormatVersion.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/IndexFormatVersion.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/IndexFormatVersion.java	(working copy)
@@ -19,6 +19,7 @@
 import java.util.Collection;
 
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.util.ReaderUtil;
 
 /**
  * This class indicates the lucene index format that is used.
@@ -102,8 +103,7 @@
      * index reader.
      */
     public static IndexFormatVersion getVersion(IndexReader indexReader) {
-        Collection<String> fields = indexReader.getFieldNames(
-                IndexReader.FieldOption.ALL);
+        Collection<String> fields = ReaderUtil.getIndexedFields(indexReader);
         if (fields.contains(FieldNames.LOCAL_NAME) || indexReader.numDocs() == 0) {
             return IndexFormatVersion.V3;
         } else if (fields.contains(FieldNames.PROPERTIES_SET)) {
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/RefCountingIndexReader.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/RefCountingIndexReader.java	(revision 1350559)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/RefCountingIndexReader.java	(working copy)
@@ -48,7 +48,7 @@
     /**
      * @return the current reference count value.
      */
-    public synchronized int getRefCount() {
+    public synchronized int getRefCountJr() {
         return refCount;
     }
 
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java	(working copy)
@@ -1765,7 +1765,7 @@
          * @inheritDoc
          */
         public int[] getParents(int n, int[] docNumbers) throws IOException {
-            int i = readerIndex(n);
+            int i = readerIndexJr(n);
             DocId id = subReaders[i].getParentDocId(n - starts[i]);
             id = id.applyOffset(starts[i]);
             return id.getDocumentNumbers(this, docNumbers);
@@ -1800,7 +1800,7 @@
          * @param n document number.
          * @return the reader index.
          */
-        private int readerIndex(int n) {
+        private int readerIndexJr(int n) {
             int lo = 0;                                      // search starts array
             int hi = subReaders.length - 1;                  // for first element less
 
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/MoreLikeThis.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/MoreLikeThis.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/MoreLikeThis.java	(working copy)
@@ -17,6 +17,7 @@
 package org.apache.jackrabbit.core.query.lucene;
 
 import org.apache.lucene.util.PriorityQueue;
+import org.apache.lucene.util.ReaderUtil;
 import org.apache.lucene.util.Version;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
@@ -154,7 +155,7 @@
      * Default analyzer to parse source doc with.
      * @see #getAnalyzer
      */
-    public static final Analyzer DEFAULT_ANALYZER = new StandardAnalyzer(Version.LUCENE_24);
+    public static final Analyzer DEFAULT_ANALYZER = new StandardAnalyzer(Version.LUCENE_36);
 
     /**
      * Ignore terms with less than this frequency in the source doc.
@@ -506,7 +507,7 @@
     public Query like(int docNum) throws IOException {
         if (fieldNames == null) {
             // gather list of valid fields from lucene
-            Collection<String> fields = ir.getFieldNames(IndexReader.FieldOption.INDEXED);
+            Collection<String> fields = ReaderUtil.getIndexedFields(ir);
             fieldNames = fields.toArray(new String[fields.size()]);
         }
 
@@ -521,7 +522,7 @@
     public Query like(File f) throws IOException {
         if (fieldNames == null) {
             // gather list of valid fields from lucene
-            Collection<String> fields = ir.getFieldNames(IndexReader.FieldOption.INDEXED);
+            Collection<String> fields = ReaderUtil.getIndexedFields(ir);
             fieldNames = fields.toArray(new String[fields.size()]);
         }
 
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/SortedLuceneQueryHits.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/SortedLuceneQueryHits.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/SortedLuceneQueryHits.java	(working copy)
@@ -152,7 +152,7 @@
     //-------------------------------< internal >-------------------------------
 
     private void getHits() throws IOException {
-        TopFieldCollector collector = TopFieldCollector.create(sort, numHits, false, true, false, false);
+        TopFieldCollector collector = TopFieldCollector.create(sort, numHits, false, true, false, true);
         searcher.search(query, collector);
         size = collector.getTotalHits();
         offset += scoreDocs.length;
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/LuceneQueryHits.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/LuceneQueryHits.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/LuceneQueryHits.java	(working copy)
@@ -49,7 +49,7 @@
         this.reader = reader;
         // We rely on Scorer#nextDoc() and Scorer#advance(int) so enable
         // scoreDocsInOrder
-        this.scorer = query.weight(searcher).scorer(reader, true, false);
+        this.scorer = query.createWeight(searcher).scorer(reader, true, false);
     }
 
     /**
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/IndexMigration.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/IndexMigration.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/IndexMigration.java	(working copy)
@@ -33,13 +33,20 @@
 import org.apache.lucene.document.FieldSelector;
 import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.FilterIndexReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
+import org.apache.lucene.index.LogByteSizeMergePolicy;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermEnum;
 import org.apache.lucene.index.TermPositions;
+import org.apache.lucene.index.UpgradeIndexMergePolicy;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.ReaderUtil;
+import org.apache.lucene.util.Version;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -102,21 +109,23 @@
         // if we get here then the index must be migrated
         log.debug("Index requires migration {}", indexDir);
 
-        String migrationName = index.getName() + "_v2.3";
+        String migrationName = index.getName() + "_v36";
         if (directoryManager.hasDirectory(migrationName)) {
             directoryManager.delete(migrationName);
         }
 
         Directory migrationDir = directoryManager.getDirectory(migrationName);
+        final IndexWriterConfig c = new IndexWriterConfig(Version.LUCENE_36, new JackrabbitAnalyzer());
+        c.setMergePolicy(new UpgradeIndexMergePolicy(new LogByteSizeMergePolicy()));
+        c.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); 
         try {
-            IndexWriter writer = new IndexWriter(migrationDir, new JackrabbitAnalyzer(),
-                    IndexWriter.MaxFieldLength.UNLIMITED);
+            IndexWriter writer = new IndexWriter(migrationDir, c);
             try {
-                IndexReader r = new MigrationIndexReader(
-                        IndexReader.open(index.getDirectory(), true),
+                IndexReader r = new MigrationIndexReader(IndexReader.open(index.getDirectory()),
                         oldSeparatorChar);
                 try {
-                    writer.addIndexes(new IndexReader[]{r});
+                    writer.addIndexes(r);
+                    writer.forceMerge(1);
                     writer.close();
                 } finally {
                     r.close();
@@ -129,8 +138,7 @@
         }
         directoryManager.delete(index.getName());
         if (!directoryManager.rename(migrationName, index.getName())) {
-            throw new IOException("failed to move migrated directory " +
-                    migrationDir);
+            throw new IOException("failed to move migrated directory " + migrationDir);
         }
         log.info("Migrated " + index.getName());
     }
@@ -150,6 +158,17 @@
             this.oldSepChar = oldSepChar;
         }
 
+        @Override
+        public IndexReader[] getSequentialSubReaders() {
+            return null;
+        }
+
+        @Override
+        public FieldInfos getFieldInfos() {
+            return ReaderUtil.getMergedFieldInfos(in);
+        }
+
+        @Override
         public Document document(int n, FieldSelector fieldSelector)
                 throws CorruptIndexException, IOException {
             Document doc = super.document(n, fieldSelector);
@@ -167,12 +186,10 @@
             return doc;
         }
 
+        @Override
         public TermEnum terms() throws IOException {
             List<TermEnum> enums = new ArrayList<TermEnum>();
-            List<String> fieldNames = new ArrayList<String>();
-            for (Object obj : in.getFieldNames(FieldOption.ALL)) {
-                fieldNames.add((String) obj);
-            }
+            List<String> fieldNames = new ArrayList<String>(ReaderUtil.getIndexedFields(in));
             Collections.sort(fieldNames);
             for (String fieldName : fieldNames) {
                 if (fieldName.equals(FieldNames.PROPERTIES)) {
@@ -184,6 +201,7 @@
             return new MigrationTermEnum(new ChainedTermEnum(enums), oldSepChar);
         }
 
+        @Override
         public TermPositions termPositions() throws IOException {
             return new MigrationTermPositions(in.termPositions(), oldSepChar);
         }
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/DescendantSelfAxisQuery.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/DescendantSelfAxisQuery.java	(revision 1350556)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/DescendantSelfAxisQuery.java	(working copy)
@@ -392,8 +392,8 @@
          */
         public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder,
                 boolean topScorer) throws IOException {
-            contextScorer = contextQuery.weight(searcher).scorer(reader, scoreDocsInOrder, topScorer);
-            subScorer = subQuery.weight(searcher).scorer(reader, scoreDocsInOrder, topScorer);
+            contextScorer = searcher.createNormalizedWeight(contextQuery).scorer(reader, scoreDocsInOrder, topScorer);
+            subScorer = searcher.createNormalizedWeight(subQuery).scorer(reader, scoreDocsInOrder, topScorer);
             HierarchyResolver resolver = (HierarchyResolver) reader;
             return new DescendantSelfAxisScorer(searcher.getSimilarity(), reader, resolver);
         }
@@ -471,9 +471,14 @@
             }
 
             collectContextHits();
-            currentDoc = subScorer.nextDoc();
             if (contextHits.isEmpty()) {
                 currentDoc = NO_MORE_DOCS;
+            } else {
+                if (subScorer != null) {
+                    currentDoc = subScorer.nextDoc();
+                } else {
+                    currentDoc = NO_MORE_DOCS;
+                }
             }
             while (currentDoc != NO_MORE_DOCS) {
                 if (isValid(currentDoc)) {
@@ -505,7 +510,9 @@
             // optimize in the case of an advance to finish.
             // see https://issues.apache.org/jira/browse/JCR-3082
             if (target == NO_MORE_DOCS) {
-                subScorer.advance(target);
+                if (subScorer != null) {
+                    subScorer.advance(target);
+                }
                 currentDoc = NO_MORE_DOCS;
                 return currentDoc;
             }
