Index: lucene/src/java/org/apache/lucene/search/BooleanQuery.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/BooleanQuery.java	(revision 1022443)
+++ lucene/src/java/org/apache/lucene/search/BooleanQuery.java	(working copy)
@@ -368,6 +368,24 @@
     return new BooleanWeight(searcher);
   }
 
+  private static final Comparator<BooleanClause> tqComp = new Comparator<BooleanClause>() {
+    public  int compare(BooleanClause one, BooleanClause two) {
+      Query oneQ = one.getQuery();
+      Query twoQ = two.getQuery();
+      boolean oneTQ = oneQ instanceof TermQuery;
+      boolean twoTQ = twoQ instanceof TermQuery;
+      if (oneTQ && twoTQ) {
+        return ((TermQuery) oneQ).getTerm().compareTo(((TermQuery) twoQ).getTerm());
+      } else if (oneTQ) {
+        return -1;
+      } else if (twoTQ) {
+        return 1;
+      } else {
+        return 0;
+      }
+    }
+  };
+
   @Override
   public Query rewrite(IndexReader reader) throws IOException {
     if (minNrShouldMatch == 0 && clauses.size() == 1) {                    // optimize 1-clause queries
@@ -386,20 +404,18 @@
       }
     }
 
-    BooleanQuery clone = null;                    // recursively rewrite
+    BooleanQuery clone = (BooleanQuery) this.clone();
+
     for (int i = 0 ; i < clauses.size(); i++) {
       BooleanClause c = clauses.get(i);
       Query query = c.getQuery().rewrite(reader);
       if (query != c.getQuery()) {                     // clause rewrote: must clone
-        if (clone == null)
-          clone = (BooleanQuery)this.clone();
         clone.clauses.set(i, new BooleanClause(query, c.getOccur()));
       }
     }
-    if (clone != null) {
-      return clone;                               // some clauses rewrote
-    } else
-      return this;                                // no clauses rewrote
+
+    Collections.sort(clone.clauses, tqComp);
+    return clone.equals(this) ? this : clone;                               // some clauses rewrote
   }
 
   // inherit javadoc
Index: lucene/src/java/org/apache/lucene/search/MultiTermQuery.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/MultiTermQuery.java	(revision 1022443)
+++ lucene/src/java/org/apache/lucene/search/MultiTermQuery.java	(working copy)
@@ -19,19 +19,28 @@
 
 import java.io.IOException;
 import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 import java.util.PriorityQueue;
 
+import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.Fields;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.queryParser.QueryParser; // for javadoc
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Attribute;
 import org.apache.lucene.util.AttributeImpl;
-import org.apache.lucene.util.PagedBytes;
+import org.apache.lucene.util.ByteBlockPool;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefHash;
+import org.apache.lucene.util.FloatUtil;
+import org.apache.lucene.util.RamUsageEstimator;
+import org.apache.lucene.util.ReaderUtil;
+import org.apache.lucene.util.BytesRefHash.DirectBytesStartArray;
 
 /**
  * An abstract {@link Query} that matches documents
@@ -177,51 +186,48 @@
   private abstract static class BooleanQueryRewrite extends RewriteMethod {
   
     protected final int collectTerms(IndexReader reader, MultiTermQuery query, TermCollector collector) throws IOException {
-      final Fields fields = MultiFields.getFields(reader);
-      if (fields == null) {
-        // reader has no fields
-        return 0;
-      }
+      final List<IndexReader> subReaders = new ArrayList<IndexReader>();
+      ReaderUtil.gatherSubReaders(subReaders, reader);
+      int count = 0;
+      
+      for (IndexReader r : subReaders) {
+        final Fields fields = r.fields();
+        if (fields == null) {
+          // reader has no fields
+          continue;
+        }
 
-      final Terms terms = fields.terms(query.field);
-      if (terms == null) {
-        // field does not exist
-        return 0;
-      }
+        final Terms terms = fields.terms(query.field);
+        if (terms == null) {
+          // field does not exist
+          continue;
+        }
 
-      final TermsEnum termsEnum = query.getTermsEnum(reader);
-      assert termsEnum != null;
+        final TermsEnum termsEnum = query.getTermsEnum(r);
+        assert termsEnum != null;
 
-      if (termsEnum == TermsEnum.EMPTY)
-        return 0;
-      final BoostAttribute boostAtt =
-        termsEnum.attributes().addAttribute(BoostAttribute.class);
-      collector.boostAtt = boostAtt;
-      int count = 0;
-      BytesRef bytes;
-      while ((bytes = termsEnum.next()) != null) {
-        if (collector.collect(termsEnum, bytes, boostAtt.getBoost())) {
-          termsEnum.cacheCurrentTerm();
-          count++;
-        } else {
-          break;
+        if (termsEnum == TermsEnum.EMPTY)
+          continue;
+        collector.setNextEnum(termsEnum);
+        BytesRef bytes;
+        while ((bytes = termsEnum.next()) != null) {
+          if (collector.collect(bytes)) {
+            termsEnum.cacheCurrentTerm();
+            count++;
+          } else {
+            return count; // interrupt whole term collection, so also don't iterate other subReaders
+          }
         }
       }
-      collector.boostAtt = null;
       return count;
     }
     
-    protected static abstract class TermCollector {
-      private BoostAttribute boostAtt = null;
-    
+    protected static interface TermCollector {
       /** return false to stop collecting */
-      public abstract boolean collect(TermsEnum termsEnum, BytesRef bytes, float boost) throws IOException;
+      boolean collect(BytesRef bytes) throws IOException;
       
-      /** set the minimum boost as a hint for the term producer */
-      protected final void setMaxNonCompetitiveBoost(float maxNonCompetitiveBoost) {
-        assert boostAtt != null;
-        boostAtt.setMaxNonCompetitiveBoost(maxNonCompetitiveBoost);
-      }
+      /** the next segment's {@link TermsEnum} that is used to collect terms */
+      void setNextEnum(TermsEnum termsEnum) throws IOException;
     }
   }
   
@@ -230,16 +236,46 @@
     public Query rewrite(final IndexReader reader, final MultiTermQuery query) throws IOException {
       final BooleanQuery result = new BooleanQuery(true);
       final Term placeholderTerm = new Term(query.field);
-      query.incTotalNumberOfTerms(collectTerms(reader, query, new TermCollector() {
-        @Override
-        public boolean collect(TermsEnum termsEnum, BytesRef bytes, float boost) {
-          // add new TQ, we must clone the term, else it may get overwritten!
-          TermQuery tq = new TermQuery(placeholderTerm.createTerm(new BytesRef(bytes)), termsEnum.docFreq());
-          tq.setBoost(query.getBoost() * boost); // set the boost
-          result.add(tq, BooleanClause.Occur.SHOULD); // add to query
+      final TermFreqBoostByteStart array = new TermFreqBoostByteStart(16);
+      final BytesRefHash terms = new BytesRefHash(new ByteBlockPool(new ByteBlockPool.DirectAllocator()), 16, array);
+      collectTerms(reader, query, new TermCollector() {
+        private TermsEnum termsEnum;
+        private BoostAttribute boostAtt;
+      
+        public void setNextEnum(TermsEnum termsEnum) throws IOException {
+          this.termsEnum = termsEnum;
+          this.boostAtt = termsEnum.attributes().addAttribute(BoostAttribute.class);
+        }
+        
+        public boolean collect(BytesRef bytes) {
+          final int e = terms.add(bytes);
+          if (e < 0 ) {
+            // duplicate term
+            final int pos = (-e)-1;
+            array.docFreq[pos] += termsEnum.docFreq();
+            assert array.boost[pos] == boostAtt.getBoost() : "boost should be equal in all segment TermsEnums";
+          } else {
+            // new entry we populate the entry initially
+            array.docFreq[e] = termsEnum.docFreq();
+            array.boost[e] = boostAtt.getBoost();
+          }
+          // if the new entry reaches the max clause count, we exit early
+          if (e >= BooleanQuery.getMaxClauseCount())
+            throw new BooleanQuery.TooManyClauses();
           return true;
         }
-      }));
+      });
+      
+      final int size = terms.size();
+      final int[] docFreq = array.docFreq;
+      final float[] boost = array.boost;
+      for (int i = 0; i < size; i++) {
+        final Term t = placeholderTerm.createTerm(terms.get(i, new BytesRef()));
+        final TermQuery tq = new TermQuery(t, docFreq[i]);
+        tq.setBoost(query.getBoost() * boost[i]);
+        result.add(tq, BooleanClause.Occur.SHOULD);
+      }
+      query.incTotalNumberOfTerms(terms.size());
       return result;
     }
 
@@ -247,6 +283,42 @@
     protected Object readResolve() {
       return SCORING_BOOLEAN_QUERY_REWRITE;
     }
+    
+    /** Special implementation of BytesStartArray that keeps parallel arrays for boost and docFreq */
+    final static class TermFreqBoostByteStart extends DirectBytesStartArray  {
+      int[] docFreq;
+      float[] boost;
+      
+      public TermFreqBoostByteStart(int initSize) {
+        super(initSize);
+      }
+
+      @Override
+      public int[] init() {
+        final int[] ord = super.init();
+        boost = new float[ArrayUtil.oversize(Math.max(ord.length, initSize),
+            RamUsageEstimator.NUM_BYTES_FLOAT)];
+        docFreq = new int[ArrayUtil.oversize(Math.max(ord.length, initSize),
+            RamUsageEstimator.NUM_BYTES_INT)];
+        return ord;
+      }
+
+      @Override
+      public int[] grow() {
+        final int[] ord = super.grow();
+        docFreq = ArrayUtil.grow(docFreq, ord.length);
+        boost = ArrayUtil.grow(boost, ord.length);
+        return ord;
+      }
+
+      @Override
+      public int[] clear() {
+       boost = null;
+       docFreq = null;
+       return super.clear();
+      }
+      
+    }
   }
 
   /** A rewrite method that first translates each term into
@@ -291,24 +363,55 @@
       final int maxSize = Math.min(size, BooleanQuery.getMaxClauseCount());
       final PriorityQueue<ScoreTerm> stQueue = new PriorityQueue<ScoreTerm>();
       collectTerms(reader, query, new TermCollector() {
-        @Override
-        public boolean collect(TermsEnum termsEnum, BytesRef bytes, float boost) {
+        private TermsEnum termsEnum;
+        private BoostAttribute boostAtt;
+        private float boostLie;
+        
+        public void setNextEnum(TermsEnum termsEnum) throws IOException {
+          this.termsEnum = termsEnum;
+          boostAtt = termsEnum.attributes().addAttribute(BoostAttribute.class);
+          float minBoost = (stQueue.size() >= maxSize) ? stQueue.peek().boost : Float.NEGATIVE_INFINITY;
+          boostLie = minBoost;
+          boostAtt.setMaxNonCompetitiveBoost(FloatUtil.nextAfter(minBoost, Double.NEGATIVE_INFINITY));
+        }
+        
+        public boolean collect(BytesRef bytes) {
+          final float boost = boostAtt.getBoost();
           // ignore uncompetetive hits
-          if (stQueue.size() >= maxSize && boost <= stQueue.peek().boost)
+          if (stQueue.size() >= maxSize && boost != boostLie && boost <= stQueue.peek().boost)
             return true;
           // add new entry in PQ, we must clone the term, else it may get overwritten!
           st.bytes.copy(bytes);
           st.boost = boost;
           st.docFreq = termsEnum.docFreq();
-          stQueue.offer(st);
-          // possibly drop entries from queue
-          st = (stQueue.size() > maxSize) ? stQueue.poll() : new ScoreTerm();
-          setMaxNonCompetitiveBoost((stQueue.size() >= maxSize) ? stQueue.peek().boost : Float.NEGATIVE_INFINITY);
+          final ScoreTerm st2 = visitedTerms.get(st.bytes);
+          if (st2 != null) {
+            // if the term is already in the queue, only update docFreq
+            st2.docFreq += st.docFreq;
+            assert st2.boost == st.boost : "boost should be equal in all segment TermsEnums";
+          } else {
+            visitedTerms.put(st.bytes, st);
+            stQueue.offer(st);
+            // possibly drop entries from queue
+            if (stQueue.size() > maxSize) {
+              st = stQueue.poll();
+              visitedTerms.remove(st.bytes);
+            } else {
+              st = new ScoreTerm();
+            }
+            float minBoost = (stQueue.size() >= maxSize) ? stQueue.peek().boost : Float.NEGATIVE_INFINITY;
+            if (minBoost == boostLie) {
+              boostAtt.setMaxNonCompetitiveBoost(FloatUtil.nextAfter(minBoost, Float.NEGATIVE_INFINITY));
+            } else {
+              boostAtt.setMaxNonCompetitiveBoost(minBoost);
+            }
+          }
           return true;
         }
         
         // reusable instance
         private ScoreTerm st = new ScoreTerm();
+        private final Map<BytesRef,ScoreTerm> visitedTerms = new HashMap<BytesRef,ScoreTerm>();
       });
       
       final Term placeholderTerm = new Term(query.field);
@@ -341,7 +444,7 @@
       return true;
     }
   
-    private static class ScoreTerm implements Comparable<ScoreTerm> {
+    private static final class ScoreTerm implements Comparable<ScoreTerm> {
       public final BytesRef bytes = new BytesRef();
       public float boost;
       public int docFreq;
@@ -510,63 +613,58 @@
       final int docCountCutoff = (int) ((docCountPercent / 100.) * reader.maxDoc());
       final int termCountLimit = Math.min(BooleanQuery.getMaxClauseCount(), termCountCutoff);
 
-      final CutOffTermCollector col = new CutOffTermCollector(reader, query.field, docCountCutoff, termCountLimit);
+      final CutOffTermCollector col = new CutOffTermCollector(docCountCutoff, termCountLimit);
       collectTerms(reader, query, col);
-      
+      final int size;
       if (col.hasCutOff) {
         return CONSTANT_SCORE_FILTER_REWRITE.rewrite(reader, query);
-      } else if (col.termCount == 0) {
+      } else if ((size = col.pendingTerms.size()) == 0) {
         return new BooleanQuery(true);
       } else {
-        final PagedBytes.Reader bytesReader = col.pendingTerms.freeze(false);
-        try {
-          final BooleanQuery bq = new BooleanQuery(true);
-          final Term placeholderTerm = new Term(query.field);
-          long start = col.startOffset;
-          for(int i = 0; i < col.termCount; i++) {
-            final BytesRef bytes = new BytesRef();
-            start = bytesReader.fillUsingLengthPrefix3(bytes, start);
-            bq.add(new TermQuery(placeholderTerm.createTerm(bytes)), BooleanClause.Occur.SHOULD);
-          }
-          // Strip scores
-          final Query result = new ConstantScoreQuery(new QueryWrapperFilter(bq));
-          result.setBoost(query.getBoost());
-          query.incTotalNumberOfTerms(col.termCount);
-          return result;
-        } finally {
-          bytesReader.close();
+        final BooleanQuery bq = new BooleanQuery(true);
+        final Term placeholderTerm = new Term(query.field);
+        final BytesRefHash pendingTerms = col.pendingTerms;
+        for(int i = 0; i < size; i++) {
+          final BytesRef bytes = new BytesRef();
+          pendingTerms.get(i, bytes);
+          // docFreq is not used for constant score here, we pass 1
+          // to explicitely set a fake value, so it's not calculated
+          bq.add(new TermQuery(placeholderTerm.createTerm(bytes), 1), BooleanClause.Occur.SHOULD);
         }
+        // Strip scores
+        final Query result = new ConstantScoreQuery(new QueryWrapperFilter(bq));
+        result.setBoost(query.getBoost());
+        query.incTotalNumberOfTerms(size);
+        return result;
       }
     }
     
-    private static final class CutOffTermCollector extends TermCollector {
-      CutOffTermCollector(IndexReader reader, String field, int docCountCutoff, int termCountLimit) {
-        this.reader = reader;
-        this.field = field;
+    private static final class CutOffTermCollector implements TermCollector {
+      CutOffTermCollector(int docCountCutoff, int termCountLimit) {
         this.docCountCutoff = docCountCutoff;
         this.termCountLimit = termCountLimit;
       }
     
-      public boolean collect(TermsEnum termsEnum, BytesRef bytes, float boost) throws IOException {
-        termCount++;
-        if (termCount >= termCountLimit || docVisitCount >= docCountCutoff) {
+      public void setNextEnum(TermsEnum termsEnum) throws IOException {
+        this.termsEnum = termsEnum;
+      }
+        
+      public boolean collect(BytesRef bytes) throws IOException {
+        if (pendingTerms.size() >= termCountLimit || docVisitCount >= docCountCutoff) {
           hasCutOff = true;
           return false;
         }
-        pendingTerms.copyUsingLengthPrefix(bytes);
+        pendingTerms.add(bytes);
         docVisitCount += termsEnum.docFreq();
         return true;
       }
       
       int docVisitCount = 0;
       boolean hasCutOff = false;
-      int termCount = 0;
-      
-      final IndexReader reader;
-      final String field;
+      TermsEnum termsEnum;
+
       final int docCountCutoff, termCountLimit;
-      final PagedBytes pendingTerms = new PagedBytes(15); // max term size is 32 KiB
-      final long startOffset = pendingTerms.getPointer();
+      final BytesRefHash pendingTerms = new BytesRefHash();
     }
 
     @Override
Index: lucene/src/java/org/apache/lucene/util/ArrayUtil.java
===================================================================
--- lucene/src/java/org/apache/lucene/util/ArrayUtil.java	(revision 1022443)
+++ lucene/src/java/org/apache/lucene/util/ArrayUtil.java	(working copy)
@@ -247,7 +247,20 @@
   public static short[] grow(short[] array) {
     return grow(array, 1 + array.length);
   }
+  
+  public static float[] grow(float[] array, int minSize) {
+    if (array.length < minSize) {
+      float[] newArray = new float[oversize(minSize, RamUsageEstimator.NUM_BYTES_FLOAT)];
+      System.arraycopy(array, 0, newArray, 0, array.length);
+      return newArray;
+    } else
+      return array;
+  }
 
+  public static float[] grow(float[] array) {
+    return grow(array, 1 + array.length);
+  }
+
   public static short[] shrink(short[] array, int targetSize) {
     final int newSize = getShrinkSize(array.length, targetSize, RamUsageEstimator.NUM_BYTES_SHORT);
     if (newSize != array.length) {
Index: lucene/src/java/org/apache/lucene/util/ByteBlockPool.java
===================================================================
--- lucene/src/java/org/apache/lucene/util/ByteBlockPool.java	(revision 1022443)
+++ lucene/src/java/org/apache/lucene/util/ByteBlockPool.java	(working copy)
@@ -16,8 +16,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+import java.util.Arrays;
+import java.util.List;
+import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF;
 
-/* Class that Posting and PostingVector use to write byte
+/** 
+ * Class that Posting and PostingVector use to write byte
  * streams into shared fixed-size byte[] arrays.  The idea
  * is to allocate slices of increasing lengths For
  * example, the first slice is 5 bytes, the next slice is
@@ -31,14 +35,10 @@
  * the end with a non-zero byte.  This way the methods
  * that are writing into the slice don't need to record
  * its length and instead allocate a new slice once they
- * hit a non-zero byte. */
-
-import java.util.Arrays;
-
-
-import java.util.List;
-import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF;
-
+ * hit a non-zero byte. 
+ * 
+ * @lucene.internal
+ **/
 public final class ByteBlockPool {
   public final static int BYTE_BLOCK_SHIFT = 15;
   public final static int BYTE_BLOCK_SIZE = 1 << BYTE_BLOCK_SHIFT;
@@ -62,7 +62,23 @@
       return new byte[blockSize];
     }
   }
+  
+  public static final class DirectAllocator extends Allocator {
+    
+    public DirectAllocator() {
+      this(BYTE_BLOCK_SIZE);
+    }
 
+    public DirectAllocator(int blockSize) {
+      super(blockSize);
+    }
+
+    @Override
+    public void recycleByteBlocks(byte[][] blocks, int start, int end) {
+    }
+    
+  }
+
   public byte[][] buffers = new byte[10][];
 
   int bufferUpto = -1;                        // Which buffer we are upto
Index: lucene/src/java/org/apache/lucene/util/BytesRefHash.java
===================================================================
--- lucene/src/java/org/apache/lucene/util/BytesRefHash.java	(revision 1022453)
+++ lucene/src/java/org/apache/lucene/util/BytesRefHash.java	(working copy)
@@ -17,13 +17,15 @@
  * limitations under the License.
  */
 
+import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_MASK;
+import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SHIFT;
+import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SIZE;
+
 import java.util.Arrays;
 import java.util.Comparator;
 import java.util.concurrent.atomic.AtomicLong;
 
-import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_MASK;
-import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SIZE;
-import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SHIFT;
+import org.apache.lucene.util.ByteBlockPool.DirectAllocator;
 
 /**
  * {@link BytesRefHash} is a special purpose hash-map like data-structure
@@ -54,6 +56,14 @@
   public static final int DEFAULT_CAPACITY = 16;
   private final BytesStartArray bytesStartArray;
   private AtomicLong bytesUsed;
+  
+  /**
+   * Creates a new {@link BytesRefHash} with a {@link ByteBlockPool} using a
+   * {@link DirectAllocator}.
+   */
+  public BytesRefHash() { 
+    this(new ByteBlockPool(new DirectAllocator()));
+  }
 
   /**
    * Creates a new {@link BytesRefHash}
@@ -75,7 +85,7 @@
     Arrays.fill(ords, -1);
     this.bytesStartArray = bytesStartArray;
     bytesStart = bytesStartArray.init();
-    bytesUsed = bytesStartArray.bytesUsed();
+    bytesUsed = bytesStartArray.bytesUsed() == null? new AtomicLong(0) : bytesStartArray.bytesUsed();;
     bytesUsed.addAndGet(hashSize * RamUsageEstimator.NUM_BYTES_INT);
   }
 
@@ -536,13 +546,13 @@
     public abstract AtomicLong bytesUsed();
   }
 
-  static class DirectBytesStartArray extends BytesStartArray {
+  public static class DirectBytesStartArray extends BytesStartArray {
 
-    private final int initSize;
+    protected final int initSize;
     private int[] bytesStart;
     private final AtomicLong bytesUsed = new AtomicLong(0);
 
-    DirectBytesStartArray(int initSize) {
+    public DirectBytesStartArray(int initSize) {
       this.initSize = initSize;
     }
 
Index: lucene/src/java/org/apache/lucene/util/FloatUtil.java
===================================================================
--- lucene/src/java/org/apache/lucene/util/FloatUtil.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/util/FloatUtil.java	(revision 0)
@@ -0,0 +1,75 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class FloatUtil {
+  /**
+   * Answers a float next to the first given float value in the direction of
+   * the second given double value.
+   * 
+   * @param start
+   *            the float value to start
+   * @param direction
+   *            the double indicating the direction
+   * @return a float next to the first given float value in the direction of
+   *         the second given double.
+   *         
+   * @since 1.6
+   */
+  @SuppressWarnings("boxing")
+  public static float nextAfter(float start, double direction) {
+    /* this implementation is from apache harmony's java 6 branch (StrictMath) */
+      if (Float.isNaN(start) || Double.isNaN(direction)) {
+          return Float.NaN;
+      }
+      if (0 == start && 0 == direction) {
+          return (float)direction;
+      }
+      if ((start == Float.MIN_VALUE && direction < start)
+              || (start == -Float.MIN_VALUE && direction > start)) {
+          return (start > 0 ? 0f : -0f);
+      }
+      if (Float.isInfinite(start) && (direction != start)) {
+          return (start > 0 ? Float.MAX_VALUE : -Float.MAX_VALUE);
+      }
+      if ((start == Float.MAX_VALUE && direction > start)
+              || (start == -Float.MAX_VALUE && direction < start)) {
+          return (start > 0 ? Float.POSITIVE_INFINITY
+                  : Float.NEGATIVE_INFINITY);
+      }
+      if (direction > start) {
+          if (start > 0) {
+              return Float.intBitsToFloat(Float.floatToIntBits(start) + 1);
+          }
+          if (start < 0) {
+              return Float.intBitsToFloat(Float.floatToIntBits(start) - 1);
+          }
+          return +Float.MIN_VALUE;
+      }
+      if (direction < start) {
+          if (start > 0) {
+              return Float.intBitsToFloat(Float.floatToIntBits(start) - 1);
+          }
+          if (start < 0) {
+              return Float.intBitsToFloat(Float.floatToIntBits(start) + 1);
+          }
+          return -Float.MIN_VALUE;
+      }
+      return (float)direction;
+  }
+}

Property changes on: lucene/src/java/org/apache/lucene/util/FloatUtil.java
___________________________________________________________________
Added: svn:keywords
   + Date Author Id Revision HeadURL
Added: svn:eol-style
   + native

