Index: src/java/org/apache/lucene/index/cache/ByteArrayCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/ByteArrayCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/ByteArrayCacheKey.java	(revision 0)
@@ -0,0 +1,120 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/* :TODO: javadocs */
+public class ByteArrayCacheKey extends CacheKey {
+
+  /** 
+   * Interface to parse bytes from document fields.
+   */
+  public interface ByteParser {
+    /** Return a byte representation of this field's value. */
+    public byte parseByte(String string);
+  }
+
+  private static final ByteParser DEFAULT_PARSER = new ByteParser() {
+      public byte parseByte(String value) {
+        return Byte.parseByte(value);
+      }
+    };
+
+  String field;
+  ByteParser parser = DEFAULT_PARSER;
+    
+  public ByteArrayCacheKey(String f, ByteParser p) {
+    this(f);
+    if (null != p) parser = p;
+  }
+  public ByteArrayCacheKey(String f) {
+    field = f;
+  }
+
+  public int hashCode() {
+    final int prime = 31;
+    int result = prime + ((field == null) ? 0 : field.hashCode());
+    result = prime * result + ((parser == null) ? 0 : parser.hashCode());
+    return result;
+  }
+
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final ByteArrayCacheKey other = (ByteArrayCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    if (parser == null) {
+      if (other.parser != null)
+        return false;
+    } else if (!parser.equals(other.parser))
+      return false;
+    return true;
+  }
+  public CacheData buildData(IndexReader reader) throws IOException {
+    return new CacheData(getByteArray(reader));
+  }
+
+  public CacheData mergeData(int[] starts, CacheData[] data) 
+    throws UnsupportedOperationException {
+    
+    byte[] results = new byte[starts[starts.length-1]];
+    for (int i = 0; i < data.length; i++) {
+      byte[] src = (byte[]) data[i].getPayload();
+      System.arraycopy(src, 0, results, starts[i], src.length);
+    }
+    return new CacheData(results);
+  }
+
+  public boolean isMergable() { return true; }
+  
+  protected byte[] getByteArray(IndexReader reader) throws IOException {
+    
+    final byte[] retArray = new byte[reader.maxDoc()];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms (new Term (field, ""));
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term==null || term.field() != field) break;
+        byte termval = parser.parseByte(term.text());
+        termDocs.seek (termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = termval;
+        }
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+    
+    return retArray;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/ShortFieldCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/ShortFieldCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/ShortFieldCacheKey.java	(revision 0)
@@ -0,0 +1,44 @@
+package org.apache.lucene.index.cache;
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+
+public class ShortFieldCacheKey extends ShortArrayCacheKey {
+
+  public ShortFieldCacheKey(String f) {
+    super(f);
+  }
+  
+  public ShortFieldCacheKey(String f, ShortParser p) {
+    super(f, p);
+  }
+  
+  public CacheData buildData(IndexReader reader) throws IOException {
+    final short[] retArray = getShortArray(reader);
+    FieldValues fieldValues = new FieldValues() {
+
+      public Comparable getOrd(int doc) {
+        return new Short(retArray[doc]);
+      }
+
+      public Comparable getValue(int doc) {
+        return new Short(retArray[doc]);
+      }
+
+    };
+
+    return new CacheData(fieldValues);
+  }
+  
+  public int hashCode() {
+    return super.hashCode() * 31;
+  }
+  
+  public boolean isMergable() { return false; }
+  
+  public boolean usesFieldValues() {
+    return true;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/LongFieldCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/LongFieldCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/LongFieldCacheKey.java	(revision 0)
@@ -0,0 +1,43 @@
+package org.apache.lucene.index.cache;
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+
+public class LongFieldCacheKey extends LongArrayCacheKey {
+
+  public LongFieldCacheKey(String f) {
+    super(f);
+  }
+
+  public LongFieldCacheKey(String f, LongParser p) {
+    super(f, p);
+  }
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+    final long[] retArray = getLongArray(reader);
+    FieldValues fieldValues = new FieldValues() {
+
+      public Comparable getOrd(int doc) {
+        return new Long(retArray[doc]);
+      }
+
+      public Comparable getValue(int doc) {
+        return new Long(retArray[doc]);
+      }
+
+    };
+
+    return new CacheData(fieldValues);
+  }
+
+  public int hashCode() {
+    return super.hashCode() * 31;
+  }
+
+  public boolean isMergable() { return false; }
+  
+  public boolean usesFieldValues() {
+    return true;
+  }
+}
Index: src/java/org/apache/lucene/index/cache/FloatArrayCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/FloatArrayCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/FloatArrayCacheKey.java	(revision 0)
@@ -0,0 +1,124 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/** 
+ * A Key for identifying cacheable floats.
+ *
+ * CacheKey instances provide all of the functionality for generating 
+ * CacheData based on an IndexReader instance.
+ */
+public class FloatArrayCacheKey extends CacheKey {
+
+  /** 
+   * Interface to parse floats from document fields.
+   */
+  public interface FloatParser {
+    /** Return a float representation of this field's value. */
+    public float parseFloat(String string);
+  }
+
+  private static final FloatParser DEFAULT_PARSER = new FloatParser() {
+      public float parseFloat(String value) {
+        return Float.parseFloat(value);
+      }
+    };
+
+  String field;
+  FloatParser parser = DEFAULT_PARSER;
+    
+  public FloatArrayCacheKey(String f, FloatParser p) {
+    this(f);
+    if (null != p) parser = p;
+  }
+  public FloatArrayCacheKey(String f) {
+    field = f;
+  }
+
+  public int hashCode() {
+    final int prime = 31;
+    int result = prime + ((field == null) ? 0 : field.hashCode());
+    result = prime * result + ((parser == null) ? 0 : parser.hashCode());
+    return result;
+  }
+
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final FloatArrayCacheKey other = (FloatArrayCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    if (parser == null) {
+      if (other.parser != null)
+        return false;
+    } else if (!parser.equals(other.parser))
+      return false;
+    return true;
+  }
+  public CacheData buildData(IndexReader reader) throws IOException {
+    return new CacheData(getFloatArray(reader));
+  }
+
+  public CacheData mergeData(int[] starts, CacheData[] data) 
+    throws UnsupportedOperationException {
+    
+    float[] results = new float[starts[starts.length-1]];
+    for (int i = 0; i < data.length; i++) {
+      float[] src = (float[]) data[i].getPayload();
+      System.arraycopy(src, 0, results, starts[i], src.length);
+    }
+    return new CacheData(results);
+  }
+
+  public boolean isMergable() { return true; }
+
+  protected float[] getFloatArray(IndexReader reader) throws IOException {
+    final float[] retArray = new float[reader.maxDoc()];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms (new Term (field, ""));
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term==null || term.field() != field) break;
+        float termval = parser.parseFloat(term.text());
+        termDocs.seek (termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = termval;
+        }
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+
+    return retArray;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/StringArrayCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/StringArrayCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/StringArrayCacheKey.java	(revision 0)
@@ -0,0 +1,99 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/** 
+ * A Key for identifying cacheable Strings.
+ *
+ * CacheKey instances provide all of the functionality for generating 
+ * CacheData based on an IndexReader instance.
+ */
+public class StringArrayCacheKey extends CacheKey {
+
+  String field;
+    
+  public StringArrayCacheKey(String f) {
+    field = f.intern();
+  }
+
+  public int hashCode() {
+    final int prime = 31;
+    int result =  prime + ((field == null) ? 0 : field.hashCode());
+    return result;
+  }
+
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final StringArrayCacheKey other = (StringArrayCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    return true;
+  }
+
+
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+      
+    final String[] retArray = new String[reader.maxDoc()];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms (new Term (field, ""));
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term==null || term.field() != field) break;
+        String termval = term.text();
+        termDocs.seek (termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = termval;
+        }
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+
+    return new CacheData(retArray);
+  }
+
+  public CacheData mergeData(int[] starts, CacheData[] data) 
+    throws UnsupportedOperationException {
+    
+    String[] results = new String[starts[starts.length-1]];
+    for (int i = 0; i < data.length; i++) {
+      String[] src = (String[]) data[i].getPayload();
+      System.arraycopy(src, 0, results, starts[i], src.length);
+    }
+    return new CacheData(results);
+  }
+
+  public boolean isMergable() { return true; }
+
+}
Index: src/java/org/apache/lucene/index/cache/Cache.java
===================================================================
--- src/java/org/apache/lucene/index/cache/Cache.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/Cache.java	(revision 0)
@@ -0,0 +1,54 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+
+/**
+ * A simple Interface for modeling some form of Cache
+ */
+public interface Cache {
+
+  /**
+   * Get data from the Cache, returns null if the key is not found 
+   * in the cache.
+   */
+  public CacheData get(CacheKey key);
+
+  /**
+   * Puts data in the Cache.
+   */
+  public void put(CacheKey key, CacheData data);
+  /**
+   * returns true if the Cache contains data for the specified key.
+   */
+  public boolean containsKey(CacheKey key);
+
+  /**
+   * Called when this Cache will no longer be used anymore, so that 
+   * it can free any external resources it may have.
+   */
+  public void close();
+  
+  public static CacheFactory FACTORY = new CacheFactory() {
+    public Cache getCache(IndexReader r) {
+      return new SimpleMapCache();
+    }
+  };
+
+}
Index: src/java/org/apache/lucene/index/cache/FieldValues.java
===================================================================
--- src/java/org/apache/lucene/index/cache/FieldValues.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/FieldValues.java	(revision 0)
@@ -0,0 +1,7 @@
+package org.apache.lucene.index.cache;
+
+
+public interface FieldValues {
+  Comparable getOrd(int doc);
+  Comparable getValue(int doc);
+}
Index: src/java/org/apache/lucene/index/cache/ByteFieldCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/ByteFieldCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/ByteFieldCacheKey.java	(revision 0)
@@ -0,0 +1,43 @@
+package org.apache.lucene.index.cache;
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+
+
+public class ByteFieldCacheKey extends ByteArrayCacheKey {
+
+  public ByteFieldCacheKey(String f) {
+    super(f);
+  }
+  
+  public ByteFieldCacheKey(String f, ByteParser p) {
+    super(f, p);
+  }
+  
+  public CacheData buildData(IndexReader reader) throws IOException {
+    final byte[] retArray = getByteArray(reader);
+    FieldValues fieldValues = new FieldValues() {
+
+      public Comparable getOrd(int doc) {
+        return new Byte(retArray[doc]);
+      }
+
+      public Comparable getValue(int doc) {
+        return new Byte(retArray[doc]);
+      }
+
+    };
+
+    return new CacheData(fieldValues);
+  }
+  
+  public int hashCode() {
+    return super.hashCode() * 31;
+  }
+  
+  public boolean usesFieldValues() {
+    return true;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/DoubleArrayCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/DoubleArrayCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/DoubleArrayCacheKey.java	(revision 0)
@@ -0,0 +1,128 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/**
+ * A Key for identifying cacheable doubles.
+ * 
+ * CacheKey instances provide all of the functionality for generating CacheData based on an IndexReader instance.
+ */
+public class DoubleArrayCacheKey extends CacheKey {
+
+  /**
+   * Interface to parse doubles from document fields.
+   */
+  public interface DoubleParser {
+    /** Return a double representation of this field's value. */
+    public double parseDouble(String string);
+  }
+
+  private static final DoubleParser DEFAULT_PARSER = new DoubleParser() {
+    public double parseDouble(String value) {
+      return Double.parseDouble(value);
+    }
+  };
+
+  String field;
+  DoubleParser parser = DEFAULT_PARSER;
+
+  public DoubleArrayCacheKey(String f, DoubleParser p) {
+    this(f);
+    if (null != p)
+      parser = p;
+  }
+
+  public DoubleArrayCacheKey(String f) {
+    field = f;
+  }
+
+  public int hashCode() {
+    final int prime = 31;
+    int result = prime + ((field == null) ? 0 : field.hashCode());
+    result = prime * result + ((parser == null) ? 0 : parser.hashCode());
+    return result;
+  }
+
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final DoubleArrayCacheKey other = (DoubleArrayCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    if (parser == null) {
+      if (other.parser != null)
+        return false;
+    } else if (!parser.equals(other.parser))
+      return false;
+    return true;
+  }
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+    return new CacheData(getDoubleArray(reader));
+  }
+
+  public CacheData mergeData(int[] starts, CacheData[] data) throws UnsupportedOperationException {
+
+    double[] results = new double[starts[starts.length - 1]];
+    for (int i = 0; i < data.length; i++) {
+      double[] src = (double[]) data[i].getPayload();
+      System.arraycopy(src, 0, results, starts[i], src.length);
+    }
+    return new CacheData(results);
+  }
+
+  public boolean isMergable() {
+    return true;
+  }
+
+  protected double[] getDoubleArray(IndexReader reader) throws IOException {
+    final double[] retArray = new double[reader.maxDoc()];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms(new Term(field, ""));
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term == null || term.field() != field)
+          break;
+        double termval = parser.parseDouble(term.text());
+        termDocs.seek(termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = termval;
+        }
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+    
+    return retArray;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/FloatFieldCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/FloatFieldCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/FloatFieldCacheKey.java	(revision 0)
@@ -0,0 +1,69 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/**
+ * A Key for identifying cacheable floats.
+ * 
+ * CacheKey instances provide all of the functionality for generating CacheData based on an IndexReader instance.
+ */
+public class FloatFieldCacheKey extends FloatArrayCacheKey {
+
+  public FloatFieldCacheKey(String f, FloatParser p) {
+    super(f,p);
+  }
+
+  public FloatFieldCacheKey(String f) {
+    super(f);
+  }
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+    final float[] retArray = getFloatArray(reader);
+    FieldValues fieldValues = new FieldValues() {
+
+      public Comparable getOrd(int doc) {
+        return new Float(retArray[doc]);
+      }
+
+      public Comparable getValue(int doc) {
+        return new Float(retArray[doc]);
+      }
+
+    };
+
+    return new CacheData(fieldValues);
+  }
+
+  public int hashCode() {
+    return super.hashCode() * 31;
+  }
+  
+  public boolean isMergable() { return false; }
+  
+  public boolean usesFieldValues() {
+    return true;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/StringFieldCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/StringFieldCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/StringFieldCacheKey.java	(revision 0)
@@ -0,0 +1,66 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+
+/** 
+ * A Key for identifying cacheable Strings. Caches all unique Strings as well 
+ * as lookup indexes.
+ *
+ * CacheKey instances provide all of the functionality for generating 
+ * CacheData based on an IndexReader instance.
+ */
+public class StringFieldCacheKey extends StringIndexCacheKey {
+
+
+  public StringFieldCacheKey(String f) {
+    super(f);
+  }
+
+  public int hashCode() {
+    return super.hashCode() * 31;
+  }
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+
+    final StringIndex stringIndex = getStringIndex(reader, field);
+    
+    FieldValues fieldValues = new FieldValues() {
+      
+      public Comparable getOrd(int doc) {
+        return new Integer(stringIndex.order[doc]);
+      }
+
+      public Comparable getValue(int doc) {
+        return stringIndex.lookup[stringIndex.order[doc]];
+      }
+      
+    };
+    
+    return new CacheData(fieldValues);
+  }
+  
+  public boolean isMergable() { return false; }
+  
+  public boolean usesFieldValues() {
+    return true;
+  }
+}
Index: src/java/org/apache/lucene/index/cache/DoubleFieldCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/DoubleFieldCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/DoubleFieldCacheKey.java	(revision 0)
@@ -0,0 +1,67 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/**
+ * A Key for identifying cacheable doubles.
+ * 
+ * CacheKey instances provide all of the functionality for generating CacheData based on an IndexReader instance.
+ */
+public class DoubleFieldCacheKey extends DoubleArrayCacheKey {
+
+  public DoubleFieldCacheKey(String f, DoubleParser p) {
+    super(f,p);
+  }
+
+  public DoubleFieldCacheKey(String f) {
+    super(f);
+  }
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+    final double[] retArray = getDoubleArray(reader);
+    FieldValues fieldValues = new FieldValues() {
+
+      public Comparable getOrd(int doc) {
+        return new Double(retArray[doc]);
+      }
+
+      public Comparable getValue(int doc) {
+        return new Double(retArray[doc]);
+      }
+
+    };
+
+    return new CacheData(fieldValues);
+  }
+
+  public int hashCode() {
+    return super.hashCode() * 31;
+  }
+  
+  public boolean usesFieldValues() {
+    return true;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/SimpleMapCache.java
===================================================================
--- src/java/org/apache/lucene/index/cache/SimpleMapCache.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/SimpleMapCache.java	(revision 0)
@@ -0,0 +1,55 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.IndexReader;
+
+
+/**
+ * A simple Map based Cache.
+ *
+ */
+public class SimpleMapCache implements Cache {
+
+  private Map data = new HashMap();
+  
+  public SimpleMapCache() { /* NOOP */ }
+
+  public synchronized void close() { /* NOOP */ }
+
+  public synchronized void put(CacheKey k, CacheData v) {
+    data.put(k, v);
+  }
+
+  public synchronized boolean containsKey(CacheKey key) {
+    return data.containsKey(key);
+  }
+  public synchronized Set keySet() {
+    return Collections.unmodifiableSet(data.keySet());
+  }
+
+  public synchronized CacheData get(CacheKey key) {
+    return (CacheData) data.get(key);
+  }
+ 
+}
Index: src/java/org/apache/lucene/index/cache/CacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/CacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/CacheKey.java	(revision 0)
@@ -0,0 +1,132 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/** 
+ * A Key for identifying cacheable data.
+ *
+ * CacheKey instances provide all of the functionality for generating 
+ * CacheData based on an IndexReader instance.
+ */
+public abstract class CacheKey {
+
+  public abstract boolean equals(Object o);
+  public abstract int hashCode();
+
+  /**
+   * Builds up the CacheData associated with this Key for the 
+   * specified IndexReader.
+   */
+  public abstract CacheData buildData(IndexReader r) throws IOException;
+
+  /**
+   * Merges the CacheData returned by buildData for various IndexReaders 
+   * such that the result is the same as if buildData had been called on a 
+   * MultiReader wrapping those IndexReaders.
+   *
+   * @param starts from a MultiReader, n+1 elements, where the first n are the starting offsets of each IndexReader and the last element is the total maxDoc.
+   * @param data n elements resulting from n calls to buildData
+   * @exception UnsupportedOperationException unless isMergable returns true.
+   */
+  public CacheData mergeData(int[] starts, CacheData[] data) 
+    throws UnsupportedOperationException {
+
+    throw new UnsupportedOperationException
+      ("data from this CacheKey cannot be merged");
+  }
+
+  /**
+   * Returns true if mergeData is a supported method for this CacheKey
+   *
+   * @see #mergeData
+   */
+  public boolean isMergable() {
+    return false;
+  }
+  
+  public boolean usesFieldValues() {
+    return false;
+  }
+  
+  /** The pattern used to detect integer values in a field */
+  /** removed for java 1.3 compatibility
+   protected static final Pattern pIntegers = Pattern.compile ("[0-9\\-]+");
+   **/
+
+  /** The pattern used to detect float values in a field */
+  /**
+   * removed for java 1.3 compatibility
+   * protected static final Object pFloats = Pattern.compile ("[0-9+\\-\\.eEfFdD]+");
+   */
+  
+  public static CacheKey getAutoCacheKey(IndexReader reader, String field) throws IOException {
+    field = ((String)field).intern();
+    TermEnum enumerator = reader.terms (new Term (field, ""));
+    try {
+      Term term = enumerator.term();
+      if (term == null) {
+        throw new RuntimeException ("no terms in field " + field + " - cannot determine sort type");
+      }
+      CacheKey ret = null;
+      if (term.field() == field) {
+        String termtext = term.text().trim();
+
+        /**
+         * Java 1.4 level code:
+
+         if (pIntegers.matcher(termtext).matches())
+         return IntegerSortedHitQueue.comparator (reader, enumerator, field);
+
+         else if (pFloats.matcher(termtext).matches())
+         return FloatSortedHitQueue.comparator (reader, enumerator, field);
+         */
+
+        // Java 1.3 level code:
+        try {
+          Integer.parseInt (termtext);
+          ret = new IntFieldCacheKey(field);
+        } catch (NumberFormatException nfe1) {
+          try {
+            Long.parseLong(termtext);
+            ret = new LongFieldCacheKey(field);
+          } catch (NumberFormatException nfe2) {
+            try {
+              Float.parseFloat (termtext);
+              ret = new FloatFieldCacheKey(field);
+            } catch (NumberFormatException nfe3) {
+              ret = new StringFieldCacheKey(field);
+            }
+          }
+        }
+      } else {
+        throw new RuntimeException ("field \"" + field + "\" does not appear to be indexed");
+      }
+      return ret;
+    } finally {
+      enumerator.close();
+    }
+  
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/IntArrayCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/IntArrayCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/IntArrayCacheKey.java	(revision 0)
@@ -0,0 +1,126 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/**
+ * A Key for identifying cacheable ints.
+ * 
+ * CacheKey instances provide all of the functionality for generating CacheData based on an IndexReader instance.
+ */
+public class IntArrayCacheKey extends CacheKey {
+
+  /**
+   * Interface to parse ints from document fields.
+   */
+  public interface IntParser {
+    /** Return an integer representation of this field's value. */
+    public int parseInt(String string);
+  }
+
+  private static final IntParser DEFAULT_PARSER = new IntParser() {
+    public int parseInt(String value) {
+      return Integer.parseInt(value);
+    }
+  };
+
+  String field;
+  IntParser parser = DEFAULT_PARSER;
+
+  public IntArrayCacheKey(String f, IntParser p) {
+    this(f);
+    if (null != p)
+      parser = p;
+  }
+
+  public IntArrayCacheKey(String f) {
+    field = f;
+  }
+
+
+  public int hashCode() {
+    final int prime = 31;
+    int result =  prime + ((field == null) ? 0 : field.hashCode());
+    result = prime * result + ((parser == null) ? 0 : parser.hashCode());
+    return result;
+  }
+
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final IntArrayCacheKey other = (IntArrayCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    if (parser == null) {
+      if (other.parser != null)
+        return false;
+    } else if (!parser.equals(other.parser))
+      return false;
+    return true;
+  }
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+    return new CacheData(getIntArray(reader));
+  }
+
+  public CacheData mergeData(int[] starts, CacheData[] data) throws UnsupportedOperationException {
+    int[] results = new int[starts[starts.length - 1]];
+    for (int i = 0; i < data.length; i++) {
+      int[] src = (int[]) data[i].getPayload();
+      System.arraycopy(src, 0, results, starts[i], src.length);
+    }
+    return new CacheData(results);
+  }
+  
+  protected int[] getIntArray(IndexReader reader) throws IOException {
+    final int[] retArray = new int[reader.maxDoc()];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms(new Term(field, ""));
+    int cnt = 0;
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term == null || term.field() != field)
+          break;
+        int termval = parser.parseInt(term.text());
+        termDocs.seek(termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = termval;
+          cnt++;
+        }
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+
+    return retArray;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/LongArrayCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/LongArrayCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/LongArrayCacheKey.java	(revision 0)
@@ -0,0 +1,109 @@
+package org.apache.lucene.index.cache;
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+/** 
+ * A Key for identifying cacheable longs.
+ *
+ * CacheKey instances provide all of the functionality for generating 
+ * CacheData based on an IndexReader instance.
+ */
+public class LongArrayCacheKey extends CacheKey {
+
+  private static final LongParser DEFAULT_PARSER = new LongParser() {
+      public long parseLong(String value) {
+        return Long.parseLong(value);
+      }
+    };
+
+  String field;
+
+  LongParser parser = DEFAULT_PARSER;
+  public LongArrayCacheKey(String f) {
+    field = f;
+  }
+    
+  public LongArrayCacheKey(String f, LongParser p) {
+    this(f);
+    if (null != p) parser = p;
+  }
+  public CacheData buildData(IndexReader reader) throws IOException {
+    return new CacheData(getLongArray(reader));
+  }
+
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final LongArrayCacheKey other = (LongArrayCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    if (parser == null) {
+      if (other.parser != null)
+        return false;
+    } else if (!parser.equals(other.parser))
+      return false;
+    return true;
+  }
+
+  protected long[] getLongArray(IndexReader reader) throws IOException {
+
+    final long[] retArray = new long[reader.maxDoc()];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms (new Term (field, ""));
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term==null || term.field() != field) break;
+        long termval = parser.parseLong(term.text());
+        termDocs.seek (termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = termval;
+        }
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+
+    return retArray;
+  }
+  
+  public int hashCode() {
+    final int prime = 31;
+    int result = prime + ((field == null) ? 0 : field.hashCode());
+    result = prime * result + ((parser == null) ? 0 : parser.hashCode());
+    return result;
+  }
+
+  public boolean isMergable() { return true; }
+
+  public CacheData mergeData(int[] starts, CacheData[] data) 
+    throws UnsupportedOperationException {
+    
+    long[] results = new long[starts[starts.length-1]];
+    for (int i = 0; i < data.length; i++) {
+      long[] src = (long[]) data[i].getPayload();
+      System.arraycopy(src, 0, results, starts[i], src.length);
+    }
+    return new CacheData(results);
+  }
+
+  /** 
+   * Interface to parse longs from document fields.
+   */
+  public interface LongParser {
+    /** Return a long representation of this field's value. */
+    public long parseLong(String string);
+  }
+  
+}
Index: src/java/org/apache/lucene/index/cache/IntFieldCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/IntFieldCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/IntFieldCacheKey.java	(revision 0)
@@ -0,0 +1,67 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+
+/**
+ * A Key for identifying cacheable ints.
+ * 
+ * CacheKey instances provide all of the functionality for generating CacheData based on an IndexReader instance.
+ */
+public class IntFieldCacheKey extends IntArrayCacheKey {
+
+
+  public IntFieldCacheKey(String f, IntParser p) {
+      super(f,p);
+  }
+
+  public IntFieldCacheKey(String f) {
+    super(f);
+  }
+  
+  public CacheData buildData(IndexReader reader) throws IOException {
+    final int[] retArray = getIntArray(reader);
+    FieldValues fieldValues = new FieldValues() {
+
+      public Comparable getOrd(int doc) {
+        return new Integer(retArray[doc]);
+      }
+
+      public Comparable getValue(int doc) {
+        return new Integer(retArray[doc]);
+      }
+
+    };
+
+    return new CacheData(fieldValues);
+  }
+  
+  public int hashCode() {
+    return super.hashCode() * 31;
+  }
+  
+  public boolean isMergable() { return false; }
+  
+  public boolean usesFieldValues() {
+    return true;
+  }
+  
+}
Index: src/java/org/apache/lucene/index/cache/ShortArrayCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/ShortArrayCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/ShortArrayCacheKey.java	(revision 0)
@@ -0,0 +1,107 @@
+package org.apache.lucene.index.cache;
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+/** 
+ * A Key for identifying cacheable shorts.
+ *
+ * CacheKey instances provide all of the functionality for generating 
+ * CacheData based on an IndexReader instance.
+ */
+public class ShortArrayCacheKey extends CacheKey {
+  /** 
+   * Interface to parse shortss from document fields.
+   */
+  public interface ShortParser {
+    /** Return a short representation of this field's value. */
+    public short parseShort(String string);
+  }
+
+  private static final ShortParser DEFAULT_PARSER = new ShortParser() {
+      public short parseShort(String value) {
+        return Short.parseShort(value);
+      }
+    };
+
+  String field;
+  ShortParser parser = DEFAULT_PARSER;
+    
+  public ShortArrayCacheKey(String f, ShortParser p) {
+    this(f);
+    if (null != p) parser = p;
+  }
+  public ShortArrayCacheKey(String f) {
+    field = f;
+  }
+
+  public int hashCode() {
+    final int prime = 31;
+    int result = prime  + ((field == null) ? 0 : field.hashCode());
+    result = prime * result + ((parser == null) ? 0 : parser.hashCode());
+    return result;
+  }
+  
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final ShortArrayCacheKey other = (ShortArrayCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    if (parser == null) {
+      if (other.parser != null)
+        return false;
+    } else if (!parser.equals(other.parser))
+      return false;
+    return true;
+  }
+  
+  public CacheData buildData(IndexReader reader) throws IOException {
+    return new CacheData(getShortArray(reader));
+  }
+
+  public CacheData mergeData(int[] starts, CacheData[] data) 
+    throws UnsupportedOperationException {
+    
+    short[] results = new short[starts[starts.length-1]];
+    for (int i = 0; i < data.length; i++) {
+      short[] src = (short[]) data[i].getPayload();
+      System.arraycopy(src, 0, results, starts[i], src.length);
+    }
+    return new CacheData(results);
+  }
+
+  public boolean isMergable() { return true; }
+  
+  protected short[] getShortArray(IndexReader reader) throws IOException {
+    final short[] retArray = new short[reader.maxDoc()];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms (new Term (field, ""));
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term==null || term.field() != field) break;
+        short termval = parser.parseShort(term.text());
+        termDocs.seek (termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = termval;
+        }
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+  
+    return retArray;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/CacheFactory.java
===================================================================
--- src/java/org/apache/lucene/index/cache/CacheFactory.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/CacheFactory.java	(revision 0)
@@ -0,0 +1,33 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+
+import java.io.IOException;
+
+
+/**
+ * 
+ *
+ */
+public interface CacheFactory {
+
+  public Cache getCache(IndexReader r) throws IOException;
+
+}
Index: src/java/org/apache/lucene/index/cache/StringIndexCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/StringIndexCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/StringIndexCacheKey.java	(revision 0)
@@ -0,0 +1,136 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/** 
+ * A Key for identifying cacheable Strings. Caches all unique Strings as well 
+ * as lookup indexes.
+ *
+ * CacheKey instances provide all of the functionality for generating 
+ * CacheData based on an IndexReader instance.
+ */
+public class StringIndexCacheKey extends CacheKey {
+
+  public static class StringIndex {
+
+    /** All the term values, in natural order. */
+    public final String[] lookup;
+
+    /** For each document, an index into the lookup array. */
+    public final int[] order;
+
+    /** Creates one of these objects */
+    public StringIndex (int[] values, String[] lookup) {
+      this.order = values;
+      this.lookup = lookup;
+    }
+  }
+
+  String field;
+    
+  public StringIndexCacheKey(String f) {
+    field = f.intern();
+  }
+
+  public int hashCode() {
+    final int prime = 31;
+    int result = prime + ((field == null) ? 0 : field.hashCode());
+    return result;
+  }
+
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final StringIndexCacheKey other = (StringIndexCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    return true;
+  }
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+    return new CacheData(getStringIndex(reader, field));
+  }
+  
+  protected StringIndex getStringIndex(IndexReader reader, String field) throws IOException {
+    final int[] retArray = new int[reader.maxDoc()];
+    String[] mterms = new String[reader.maxDoc()+1];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms (new Term (field, ""));
+    int t = 0;  // current term number
+    
+    // an entry for documents that have no terms in this field
+    // should a document with no terms be at top or bottom?
+    // this puts them at the top - if it is changed, FieldDocSortedHitQueue
+    // needs to change as well.
+    mterms[t++] = null;
+    
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term==null || term.field() != field) break;
+        
+        // store term text
+        // we expect that there is at most one term per document
+        if (t >= mterms.length) 
+          throw new RuntimeException 
+            ("there are more terms than documents in field \"" + field + 
+             "\", but it's impossible to sort on tokenized fields");
+        mterms[t] = term.text();
+        
+        termDocs.seek (termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = t;
+        }
+        
+        t++;
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+    
+    if (t == 0) {
+      // if there are no terms, make the term array
+      // have a single null entry
+      mterms = new String[1];
+    } else if (t < mterms.length) {
+      // if there are less terms than documents,
+      // trim off the dead array space
+      String[] terms = new String[t];
+      System.arraycopy (mterms, 0, terms, 0, t);
+      mterms = terms;
+    }
+    
+    return new StringIndex(retArray, mterms);
+  }
+  
+  public boolean isMergable() { return false; }
+  
+}
Index: src/java/org/apache/lucene/index/cache/CacheData.java
===================================================================
--- src/java/org/apache/lucene/index/cache/CacheData.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/CacheData.java	(revision 0)
@@ -0,0 +1,31 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/**
+ * Encapsulates cached data
+ *
+ */
+public class CacheData {
+  public CacheData(Object payload) {
+    this.payload = payload;
+  }
+  private Object payload;
+  public Object getPayload() { return payload; }
+}
Index: src/java/org/apache/lucene/index/cache/ByteArrayCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/ByteArrayCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/ByteArrayCacheKey.java	(revision 0)
@@ -0,0 +1,120 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/* :TODO: javadocs */
+public class ByteArrayCacheKey extends CacheKey {
+
+  /** 
+   * Interface to parse bytes from document fields.
+   */
+  public interface ByteParser {
+    /** Return a byte representation of this field's value. */
+    public byte parseByte(String string);
+  }
+
+  private static final ByteParser DEFAULT_PARSER = new ByteParser() {
+      public byte parseByte(String value) {
+        return Byte.parseByte(value);
+      }
+    };
+
+  String field;
+  ByteParser parser = DEFAULT_PARSER;
+    
+  public ByteArrayCacheKey(String f, ByteParser p) {
+    this(f);
+    if (null != p) parser = p;
+  }
+  public ByteArrayCacheKey(String f) {
+    field = f;
+  }
+
+  public int hashCode() {
+    final int prime = 31;
+    int result = prime + ((field == null) ? 0 : field.hashCode());
+    result = prime * result + ((parser == null) ? 0 : parser.hashCode());
+    return result;
+  }
+
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final ByteArrayCacheKey other = (ByteArrayCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    if (parser == null) {
+      if (other.parser != null)
+        return false;
+    } else if (!parser.equals(other.parser))
+      return false;
+    return true;
+  }
+  public CacheData buildData(IndexReader reader) throws IOException {
+    return new CacheData(getByteArray(reader));
+  }
+
+  public CacheData mergeData(int[] starts, CacheData[] data) 
+    throws UnsupportedOperationException {
+    
+    byte[] results = new byte[starts[starts.length-1]];
+    for (int i = 0; i < data.length; i++) {
+      byte[] src = (byte[]) data[i].getPayload();
+      System.arraycopy(src, 0, results, starts[i], src.length);
+    }
+    return new CacheData(results);
+  }
+
+  public boolean isMergable() { return true; }
+  
+  protected byte[] getByteArray(IndexReader reader) throws IOException {
+    
+    final byte[] retArray = new byte[reader.maxDoc()];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms (new Term (field, ""));
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term==null || term.field() != field) break;
+        byte termval = parser.parseByte(term.text());
+        termDocs.seek (termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = termval;
+        }
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+    
+    return retArray;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/ByteFieldCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/ByteFieldCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/ByteFieldCacheKey.java	(revision 0)
@@ -0,0 +1,43 @@
+package org.apache.lucene.index.cache;
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+
+
+public class ByteFieldCacheKey extends ByteArrayCacheKey {
+
+  public ByteFieldCacheKey(String f) {
+    super(f);
+  }
+  
+  public ByteFieldCacheKey(String f, ByteParser p) {
+    super(f, p);
+  }
+  
+  public CacheData buildData(IndexReader reader) throws IOException {
+    final byte[] retArray = getByteArray(reader);
+    FieldValues fieldValues = new FieldValues() {
+
+      public Comparable getOrd(int doc) {
+        return new Byte(retArray[doc]);
+      }
+
+      public Comparable getValue(int doc) {
+        return new Byte(retArray[doc]);
+      }
+
+    };
+
+    return new CacheData(fieldValues);
+  }
+  
+  public int hashCode() {
+    return super.hashCode() * 31;
+  }
+  
+  public boolean usesFieldValues() {
+    return true;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/Cache.java
===================================================================
--- src/java/org/apache/lucene/index/cache/Cache.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/Cache.java	(revision 0)
@@ -0,0 +1,54 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+
+/**
+ * A simple Interface for modeling some form of Cache
+ */
+public interface Cache {
+
+  /**
+   * Get data from the Cache, returns null if the key is not found 
+   * in the cache.
+   */
+  public CacheData get(CacheKey key);
+
+  /**
+   * Puts data in the Cache.
+   */
+  public void put(CacheKey key, CacheData data);
+  /**
+   * returns true if the Cache contains data for the specified key.
+   */
+  public boolean containsKey(CacheKey key);
+
+  /**
+   * Called when this Cache will no longer be used anymore, so that 
+   * it can free any external resources it may have.
+   */
+  public void close();
+  
+  public static CacheFactory FACTORY = new CacheFactory() {
+    public Cache getCache(IndexReader r) {
+      return new SimpleMapCache();
+    }
+  };
+
+}
Index: src/java/org/apache/lucene/index/cache/CacheData.java
===================================================================
--- src/java/org/apache/lucene/index/cache/CacheData.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/CacheData.java	(revision 0)
@@ -0,0 +1,31 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/**
+ * Encapsulates cached data
+ *
+ */
+public class CacheData {
+  public CacheData(Object payload) {
+    this.payload = payload;
+  }
+  private Object payload;
+  public Object getPayload() { return payload; }
+}
Index: src/java/org/apache/lucene/index/cache/CacheFactory.java
===================================================================
--- src/java/org/apache/lucene/index/cache/CacheFactory.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/CacheFactory.java	(revision 0)
@@ -0,0 +1,33 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+
+import java.io.IOException;
+
+
+/**
+ * 
+ *
+ */
+public interface CacheFactory {
+
+  public Cache getCache(IndexReader r) throws IOException;
+
+}
Index: src/java/org/apache/lucene/index/cache/CacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/CacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/CacheKey.java	(revision 0)
@@ -0,0 +1,132 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/** 
+ * A Key for identifying cacheable data.
+ *
+ * CacheKey instances provide all of the functionality for generating 
+ * CacheData based on an IndexReader instance.
+ */
+public abstract class CacheKey {
+
+  public abstract boolean equals(Object o);
+  public abstract int hashCode();
+
+  /**
+   * Builds up the CacheData associated with this Key for the 
+   * specified IndexReader.
+   */
+  public abstract CacheData buildData(IndexReader r) throws IOException;
+
+  /**
+   * Merges the CacheData returned by buildData for various IndexReaders 
+   * such that the result is the same as if buildData had been called on a 
+   * MultiReader wrapping those IndexReaders.
+   *
+   * @param starts from a MultiReader, n+1 elements, where the first n are the starting offsets of each IndexReader and the last element is the total maxDoc.
+   * @param data n elements resulting from n calls to buildData
+   * @exception UnsupportedOperationException unless isMergable returns true.
+   */
+  public CacheData mergeData(int[] starts, CacheData[] data) 
+    throws UnsupportedOperationException {
+
+    throw new UnsupportedOperationException
+      ("data from this CacheKey cannot be merged");
+  }
+
+  /**
+   * Returns true if mergeData is a supported method for this CacheKey
+   *
+   * @see #mergeData
+   */
+  public boolean isMergable() {
+    return false;
+  }
+  
+  public boolean usesFieldValues() {
+    return false;
+  }
+  
+  /** The pattern used to detect integer values in a field */
+  /** removed for java 1.3 compatibility
+   protected static final Pattern pIntegers = Pattern.compile ("[0-9\\-]+");
+   **/
+
+  /** The pattern used to detect float values in a field */
+  /**
+   * removed for java 1.3 compatibility
+   * protected static final Object pFloats = Pattern.compile ("[0-9+\\-\\.eEfFdD]+");
+   */
+  
+  public static CacheKey getAutoCacheKey(IndexReader reader, String field) throws IOException {
+    field = ((String)field).intern();
+    TermEnum enumerator = reader.terms (new Term (field, ""));
+    try {
+      Term term = enumerator.term();
+      if (term == null) {
+        throw new RuntimeException ("no terms in field " + field + " - cannot determine sort type");
+      }
+      CacheKey ret = null;
+      if (term.field() == field) {
+        String termtext = term.text().trim();
+
+        /**
+         * Java 1.4 level code:
+
+         if (pIntegers.matcher(termtext).matches())
+         return IntegerSortedHitQueue.comparator (reader, enumerator, field);
+
+         else if (pFloats.matcher(termtext).matches())
+         return FloatSortedHitQueue.comparator (reader, enumerator, field);
+         */
+
+        // Java 1.3 level code:
+        try {
+          Integer.parseInt (termtext);
+          ret = new IntFieldCacheKey(field);
+        } catch (NumberFormatException nfe1) {
+          try {
+            Long.parseLong(termtext);
+            ret = new LongFieldCacheKey(field);
+          } catch (NumberFormatException nfe2) {
+            try {
+              Float.parseFloat (termtext);
+              ret = new FloatFieldCacheKey(field);
+            } catch (NumberFormatException nfe3) {
+              ret = new StringFieldCacheKey(field);
+            }
+          }
+        }
+      } else {
+        throw new RuntimeException ("field \"" + field + "\" does not appear to be indexed");
+      }
+      return ret;
+    } finally {
+      enumerator.close();
+    }
+  
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/DoubleArrayCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/DoubleArrayCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/DoubleArrayCacheKey.java	(revision 0)
@@ -0,0 +1,128 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/**
+ * A Key for identifying cacheable doubles.
+ * 
+ * CacheKey instances provide all of the functionality for generating CacheData based on an IndexReader instance.
+ */
+public class DoubleArrayCacheKey extends CacheKey {
+
+  /**
+   * Interface to parse doubles from document fields.
+   */
+  public interface DoubleParser {
+    /** Return a double representation of this field's value. */
+    public double parseDouble(String string);
+  }
+
+  private static final DoubleParser DEFAULT_PARSER = new DoubleParser() {
+    public double parseDouble(String value) {
+      return Double.parseDouble(value);
+    }
+  };
+
+  String field;
+  DoubleParser parser = DEFAULT_PARSER;
+
+  public DoubleArrayCacheKey(String f, DoubleParser p) {
+    this(f);
+    if (null != p)
+      parser = p;
+  }
+
+  public DoubleArrayCacheKey(String f) {
+    field = f;
+  }
+
+  public int hashCode() {
+    final int prime = 31;
+    int result = prime + ((field == null) ? 0 : field.hashCode());
+    result = prime * result + ((parser == null) ? 0 : parser.hashCode());
+    return result;
+  }
+
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final DoubleArrayCacheKey other = (DoubleArrayCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    if (parser == null) {
+      if (other.parser != null)
+        return false;
+    } else if (!parser.equals(other.parser))
+      return false;
+    return true;
+  }
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+    return new CacheData(getDoubleArray(reader));
+  }
+
+  public CacheData mergeData(int[] starts, CacheData[] data) throws UnsupportedOperationException {
+
+    double[] results = new double[starts[starts.length - 1]];
+    for (int i = 0; i < data.length; i++) {
+      double[] src = (double[]) data[i].getPayload();
+      System.arraycopy(src, 0, results, starts[i], src.length);
+    }
+    return new CacheData(results);
+  }
+
+  public boolean isMergable() {
+    return true;
+  }
+
+  protected double[] getDoubleArray(IndexReader reader) throws IOException {
+    final double[] retArray = new double[reader.maxDoc()];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms(new Term(field, ""));
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term == null || term.field() != field)
+          break;
+        double termval = parser.parseDouble(term.text());
+        termDocs.seek(termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = termval;
+        }
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+    
+    return retArray;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/DoubleFieldCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/DoubleFieldCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/DoubleFieldCacheKey.java	(revision 0)
@@ -0,0 +1,67 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/**
+ * A Key for identifying cacheable doubles.
+ * 
+ * CacheKey instances provide all of the functionality for generating CacheData based on an IndexReader instance.
+ */
+public class DoubleFieldCacheKey extends DoubleArrayCacheKey {
+
+  public DoubleFieldCacheKey(String f, DoubleParser p) {
+    super(f,p);
+  }
+
+  public DoubleFieldCacheKey(String f) {
+    super(f);
+  }
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+    final double[] retArray = getDoubleArray(reader);
+    FieldValues fieldValues = new FieldValues() {
+
+      public Comparable getOrd(int doc) {
+        return new Double(retArray[doc]);
+      }
+
+      public Comparable getValue(int doc) {
+        return new Double(retArray[doc]);
+      }
+
+    };
+
+    return new CacheData(fieldValues);
+  }
+
+  public int hashCode() {
+    return super.hashCode() * 31;
+  }
+  
+  public boolean usesFieldValues() {
+    return true;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/FieldValues.java
===================================================================
--- src/java/org/apache/lucene/index/cache/FieldValues.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/FieldValues.java	(revision 0)
@@ -0,0 +1,7 @@
+package org.apache.lucene.index.cache;
+
+
+public interface FieldValues {
+  Comparable getOrd(int doc);
+  Comparable getValue(int doc);
+}
Index: src/java/org/apache/lucene/index/cache/FloatArrayCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/FloatArrayCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/FloatArrayCacheKey.java	(revision 0)
@@ -0,0 +1,124 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/** 
+ * A Key for identifying cacheable floats.
+ *
+ * CacheKey instances provide all of the functionality for generating 
+ * CacheData based on an IndexReader instance.
+ */
+public class FloatArrayCacheKey extends CacheKey {
+
+  /** 
+   * Interface to parse floats from document fields.
+   */
+  public interface FloatParser {
+    /** Return a float representation of this field's value. */
+    public float parseFloat(String string);
+  }
+
+  private static final FloatParser DEFAULT_PARSER = new FloatParser() {
+      public float parseFloat(String value) {
+        return Float.parseFloat(value);
+      }
+    };
+
+  String field;
+  FloatParser parser = DEFAULT_PARSER;
+    
+  public FloatArrayCacheKey(String f, FloatParser p) {
+    this(f);
+    if (null != p) parser = p;
+  }
+  public FloatArrayCacheKey(String f) {
+    field = f;
+  }
+
+  public int hashCode() {
+    final int prime = 31;
+    int result = prime + ((field == null) ? 0 : field.hashCode());
+    result = prime * result + ((parser == null) ? 0 : parser.hashCode());
+    return result;
+  }
+
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final FloatArrayCacheKey other = (FloatArrayCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    if (parser == null) {
+      if (other.parser != null)
+        return false;
+    } else if (!parser.equals(other.parser))
+      return false;
+    return true;
+  }
+  public CacheData buildData(IndexReader reader) throws IOException {
+    return new CacheData(getFloatArray(reader));
+  }
+
+  public CacheData mergeData(int[] starts, CacheData[] data) 
+    throws UnsupportedOperationException {
+    
+    float[] results = new float[starts[starts.length-1]];
+    for (int i = 0; i < data.length; i++) {
+      float[] src = (float[]) data[i].getPayload();
+      System.arraycopy(src, 0, results, starts[i], src.length);
+    }
+    return new CacheData(results);
+  }
+
+  public boolean isMergable() { return true; }
+
+  protected float[] getFloatArray(IndexReader reader) throws IOException {
+    final float[] retArray = new float[reader.maxDoc()];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms (new Term (field, ""));
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term==null || term.field() != field) break;
+        float termval = parser.parseFloat(term.text());
+        termDocs.seek (termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = termval;
+        }
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+
+    return retArray;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/FloatFieldCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/FloatFieldCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/FloatFieldCacheKey.java	(revision 0)
@@ -0,0 +1,69 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/**
+ * A Key for identifying cacheable floats.
+ * 
+ * CacheKey instances provide all of the functionality for generating CacheData based on an IndexReader instance.
+ */
+public class FloatFieldCacheKey extends FloatArrayCacheKey {
+
+  public FloatFieldCacheKey(String f, FloatParser p) {
+    super(f,p);
+  }
+
+  public FloatFieldCacheKey(String f) {
+    super(f);
+  }
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+    final float[] retArray = getFloatArray(reader);
+    FieldValues fieldValues = new FieldValues() {
+
+      public Comparable getOrd(int doc) {
+        return new Float(retArray[doc]);
+      }
+
+      public Comparable getValue(int doc) {
+        return new Float(retArray[doc]);
+      }
+
+    };
+
+    return new CacheData(fieldValues);
+  }
+
+  public int hashCode() {
+    return super.hashCode() * 31;
+  }
+  
+  public boolean isMergable() { return false; }
+  
+  public boolean usesFieldValues() {
+    return true;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/IntArrayCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/IntArrayCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/IntArrayCacheKey.java	(revision 0)
@@ -0,0 +1,126 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/**
+ * A Key for identifying cacheable ints.
+ * 
+ * CacheKey instances provide all of the functionality for generating CacheData based on an IndexReader instance.
+ */
+public class IntArrayCacheKey extends CacheKey {
+
+  /**
+   * Interface to parse ints from document fields.
+   */
+  public interface IntParser {
+    /** Return an integer representation of this field's value. */
+    public int parseInt(String string);
+  }
+
+  private static final IntParser DEFAULT_PARSER = new IntParser() {
+    public int parseInt(String value) {
+      return Integer.parseInt(value);
+    }
+  };
+
+  String field;
+  IntParser parser = DEFAULT_PARSER;
+
+  public IntArrayCacheKey(String f, IntParser p) {
+    this(f);
+    if (null != p)
+      parser = p;
+  }
+
+  public IntArrayCacheKey(String f) {
+    field = f;
+  }
+
+
+  public int hashCode() {
+    final int prime = 31;
+    int result =  prime + ((field == null) ? 0 : field.hashCode());
+    result = prime * result + ((parser == null) ? 0 : parser.hashCode());
+    return result;
+  }
+
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final IntArrayCacheKey other = (IntArrayCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    if (parser == null) {
+      if (other.parser != null)
+        return false;
+    } else if (!parser.equals(other.parser))
+      return false;
+    return true;
+  }
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+    return new CacheData(getIntArray(reader));
+  }
+
+  public CacheData mergeData(int[] starts, CacheData[] data) throws UnsupportedOperationException {
+    int[] results = new int[starts[starts.length - 1]];
+    for (int i = 0; i < data.length; i++) {
+      int[] src = (int[]) data[i].getPayload();
+      System.arraycopy(src, 0, results, starts[i], src.length);
+    }
+    return new CacheData(results);
+  }
+  
+  protected int[] getIntArray(IndexReader reader) throws IOException {
+    final int[] retArray = new int[reader.maxDoc()];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms(new Term(field, ""));
+    int cnt = 0;
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term == null || term.field() != field)
+          break;
+        int termval = parser.parseInt(term.text());
+        termDocs.seek(termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = termval;
+          cnt++;
+        }
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+
+    return retArray;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/IntFieldCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/IntFieldCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/IntFieldCacheKey.java	(revision 0)
@@ -0,0 +1,67 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+
+/**
+ * A Key for identifying cacheable ints.
+ * 
+ * CacheKey instances provide all of the functionality for generating CacheData based on an IndexReader instance.
+ */
+public class IntFieldCacheKey extends IntArrayCacheKey {
+
+
+  public IntFieldCacheKey(String f, IntParser p) {
+      super(f,p);
+  }
+
+  public IntFieldCacheKey(String f) {
+    super(f);
+  }
+  
+  public CacheData buildData(IndexReader reader) throws IOException {
+    final int[] retArray = getIntArray(reader);
+    FieldValues fieldValues = new FieldValues() {
+
+      public Comparable getOrd(int doc) {
+        return new Integer(retArray[doc]);
+      }
+
+      public Comparable getValue(int doc) {
+        return new Integer(retArray[doc]);
+      }
+
+    };
+
+    return new CacheData(fieldValues);
+  }
+  
+  public int hashCode() {
+    return super.hashCode() * 31;
+  }
+  
+  public boolean isMergable() { return false; }
+  
+  public boolean usesFieldValues() {
+    return true;
+  }
+  
+}
Index: src/java/org/apache/lucene/index/cache/LongArrayCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/LongArrayCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/LongArrayCacheKey.java	(revision 0)
@@ -0,0 +1,109 @@
+package org.apache.lucene.index.cache;
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+/** 
+ * A Key for identifying cacheable longs.
+ *
+ * CacheKey instances provide all of the functionality for generating 
+ * CacheData based on an IndexReader instance.
+ */
+public class LongArrayCacheKey extends CacheKey {
+
+  private static final LongParser DEFAULT_PARSER = new LongParser() {
+      public long parseLong(String value) {
+        return Long.parseLong(value);
+      }
+    };
+
+  String field;
+
+  LongParser parser = DEFAULT_PARSER;
+  public LongArrayCacheKey(String f) {
+    field = f;
+  }
+    
+  public LongArrayCacheKey(String f, LongParser p) {
+    this(f);
+    if (null != p) parser = p;
+  }
+  public CacheData buildData(IndexReader reader) throws IOException {
+    return new CacheData(getLongArray(reader));
+  }
+
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final LongArrayCacheKey other = (LongArrayCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    if (parser == null) {
+      if (other.parser != null)
+        return false;
+    } else if (!parser.equals(other.parser))
+      return false;
+    return true;
+  }
+
+  protected long[] getLongArray(IndexReader reader) throws IOException {
+
+    final long[] retArray = new long[reader.maxDoc()];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms (new Term (field, ""));
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term==null || term.field() != field) break;
+        long termval = parser.parseLong(term.text());
+        termDocs.seek (termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = termval;
+        }
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+
+    return retArray;
+  }
+  
+  public int hashCode() {
+    final int prime = 31;
+    int result = prime + ((field == null) ? 0 : field.hashCode());
+    result = prime * result + ((parser == null) ? 0 : parser.hashCode());
+    return result;
+  }
+
+  public boolean isMergable() { return true; }
+
+  public CacheData mergeData(int[] starts, CacheData[] data) 
+    throws UnsupportedOperationException {
+    
+    long[] results = new long[starts[starts.length-1]];
+    for (int i = 0; i < data.length; i++) {
+      long[] src = (long[]) data[i].getPayload();
+      System.arraycopy(src, 0, results, starts[i], src.length);
+    }
+    return new CacheData(results);
+  }
+
+  /** 
+   * Interface to parse longs from document fields.
+   */
+  public interface LongParser {
+    /** Return a long representation of this field's value. */
+    public long parseLong(String string);
+  }
+  
+}
Index: src/java/org/apache/lucene/index/cache/LongFieldCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/LongFieldCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/LongFieldCacheKey.java	(revision 0)
@@ -0,0 +1,43 @@
+package org.apache.lucene.index.cache;
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+
+public class LongFieldCacheKey extends LongArrayCacheKey {
+
+  public LongFieldCacheKey(String f) {
+    super(f);
+  }
+
+  public LongFieldCacheKey(String f, LongParser p) {
+    super(f, p);
+  }
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+    final long[] retArray = getLongArray(reader);
+    FieldValues fieldValues = new FieldValues() {
+
+      public Comparable getOrd(int doc) {
+        return new Long(retArray[doc]);
+      }
+
+      public Comparable getValue(int doc) {
+        return new Long(retArray[doc]);
+      }
+
+    };
+
+    return new CacheData(fieldValues);
+  }
+
+  public int hashCode() {
+    return super.hashCode() * 31;
+  }
+
+  public boolean isMergable() { return false; }
+  
+  public boolean usesFieldValues() {
+    return true;
+  }
+}
Index: src/java/org/apache/lucene/index/cache/ShortArrayCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/ShortArrayCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/ShortArrayCacheKey.java	(revision 0)
@@ -0,0 +1,107 @@
+package org.apache.lucene.index.cache;
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+/** 
+ * A Key for identifying cacheable shorts.
+ *
+ * CacheKey instances provide all of the functionality for generating 
+ * CacheData based on an IndexReader instance.
+ */
+public class ShortArrayCacheKey extends CacheKey {
+  /** 
+   * Interface to parse shortss from document fields.
+   */
+  public interface ShortParser {
+    /** Return a short representation of this field's value. */
+    public short parseShort(String string);
+  }
+
+  private static final ShortParser DEFAULT_PARSER = new ShortParser() {
+      public short parseShort(String value) {
+        return Short.parseShort(value);
+      }
+    };
+
+  String field;
+  ShortParser parser = DEFAULT_PARSER;
+    
+  public ShortArrayCacheKey(String f, ShortParser p) {
+    this(f);
+    if (null != p) parser = p;
+  }
+  public ShortArrayCacheKey(String f) {
+    field = f;
+  }
+
+  public int hashCode() {
+    final int prime = 31;
+    int result = prime  + ((field == null) ? 0 : field.hashCode());
+    result = prime * result + ((parser == null) ? 0 : parser.hashCode());
+    return result;
+  }
+  
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final ShortArrayCacheKey other = (ShortArrayCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    if (parser == null) {
+      if (other.parser != null)
+        return false;
+    } else if (!parser.equals(other.parser))
+      return false;
+    return true;
+  }
+  
+  public CacheData buildData(IndexReader reader) throws IOException {
+    return new CacheData(getShortArray(reader));
+  }
+
+  public CacheData mergeData(int[] starts, CacheData[] data) 
+    throws UnsupportedOperationException {
+    
+    short[] results = new short[starts[starts.length-1]];
+    for (int i = 0; i < data.length; i++) {
+      short[] src = (short[]) data[i].getPayload();
+      System.arraycopy(src, 0, results, starts[i], src.length);
+    }
+    return new CacheData(results);
+  }
+
+  public boolean isMergable() { return true; }
+  
+  protected short[] getShortArray(IndexReader reader) throws IOException {
+    final short[] retArray = new short[reader.maxDoc()];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms (new Term (field, ""));
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term==null || term.field() != field) break;
+        short termval = parser.parseShort(term.text());
+        termDocs.seek (termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = termval;
+        }
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+  
+    return retArray;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/ShortFieldCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/ShortFieldCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/ShortFieldCacheKey.java	(revision 0)
@@ -0,0 +1,44 @@
+package org.apache.lucene.index.cache;
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+
+public class ShortFieldCacheKey extends ShortArrayCacheKey {
+
+  public ShortFieldCacheKey(String f) {
+    super(f);
+  }
+  
+  public ShortFieldCacheKey(String f, ShortParser p) {
+    super(f, p);
+  }
+  
+  public CacheData buildData(IndexReader reader) throws IOException {
+    final short[] retArray = getShortArray(reader);
+    FieldValues fieldValues = new FieldValues() {
+
+      public Comparable getOrd(int doc) {
+        return new Short(retArray[doc]);
+      }
+
+      public Comparable getValue(int doc) {
+        return new Short(retArray[doc]);
+      }
+
+    };
+
+    return new CacheData(fieldValues);
+  }
+  
+  public int hashCode() {
+    return super.hashCode() * 31;
+  }
+  
+  public boolean isMergable() { return false; }
+  
+  public boolean usesFieldValues() {
+    return true;
+  }
+
+}
Index: src/java/org/apache/lucene/index/cache/SimpleMapCache.java
===================================================================
--- src/java/org/apache/lucene/index/cache/SimpleMapCache.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/SimpleMapCache.java	(revision 0)
@@ -0,0 +1,55 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.IndexReader;
+
+
+/**
+ * A simple Map based Cache.
+ *
+ */
+public class SimpleMapCache implements Cache {
+
+  private Map data = new HashMap();
+  
+  public SimpleMapCache() { /* NOOP */ }
+
+  public synchronized void close() { /* NOOP */ }
+
+  public synchronized void put(CacheKey k, CacheData v) {
+    data.put(k, v);
+  }
+
+  public synchronized boolean containsKey(CacheKey key) {
+    return data.containsKey(key);
+  }
+  public synchronized Set keySet() {
+    return Collections.unmodifiableSet(data.keySet());
+  }
+
+  public synchronized CacheData get(CacheKey key) {
+    return (CacheData) data.get(key);
+  }
+ 
+}
Index: src/java/org/apache/lucene/index/cache/StringArrayCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/StringArrayCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/StringArrayCacheKey.java	(revision 0)
@@ -0,0 +1,99 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/** 
+ * A Key for identifying cacheable Strings.
+ *
+ * CacheKey instances provide all of the functionality for generating 
+ * CacheData based on an IndexReader instance.
+ */
+public class StringArrayCacheKey extends CacheKey {
+
+  String field;
+    
+  public StringArrayCacheKey(String f) {
+    field = f.intern();
+  }
+
+  public int hashCode() {
+    final int prime = 31;
+    int result =  prime + ((field == null) ? 0 : field.hashCode());
+    return result;
+  }
+
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final StringArrayCacheKey other = (StringArrayCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    return true;
+  }
+
+
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+      
+    final String[] retArray = new String[reader.maxDoc()];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms (new Term (field, ""));
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term==null || term.field() != field) break;
+        String termval = term.text();
+        termDocs.seek (termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = termval;
+        }
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+
+    return new CacheData(retArray);
+  }
+
+  public CacheData mergeData(int[] starts, CacheData[] data) 
+    throws UnsupportedOperationException {
+    
+    String[] results = new String[starts[starts.length-1]];
+    for (int i = 0; i < data.length; i++) {
+      String[] src = (String[]) data[i].getPayload();
+      System.arraycopy(src, 0, results, starts[i], src.length);
+    }
+    return new CacheData(results);
+  }
+
+  public boolean isMergable() { return true; }
+
+}
Index: src/java/org/apache/lucene/index/cache/StringFieldCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/StringFieldCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/StringFieldCacheKey.java	(revision 0)
@@ -0,0 +1,66 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+
+/** 
+ * A Key for identifying cacheable Strings. Caches all unique Strings as well 
+ * as lookup indexes.
+ *
+ * CacheKey instances provide all of the functionality for generating 
+ * CacheData based on an IndexReader instance.
+ */
+public class StringFieldCacheKey extends StringIndexCacheKey {
+
+
+  public StringFieldCacheKey(String f) {
+    super(f);
+  }
+
+  public int hashCode() {
+    return super.hashCode() * 31;
+  }
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+
+    final StringIndex stringIndex = getStringIndex(reader, field);
+    
+    FieldValues fieldValues = new FieldValues() {
+      
+      public Comparable getOrd(int doc) {
+        return new Integer(stringIndex.order[doc]);
+      }
+
+      public Comparable getValue(int doc) {
+        return stringIndex.lookup[stringIndex.order[doc]];
+      }
+      
+    };
+    
+    return new CacheData(fieldValues);
+  }
+  
+  public boolean isMergable() { return false; }
+  
+  public boolean usesFieldValues() {
+    return true;
+  }
+}
Index: src/java/org/apache/lucene/index/cache/StringIndexCacheKey.java
===================================================================
--- src/java/org/apache/lucene/index/cache/StringIndexCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/index/cache/StringIndexCacheKey.java	(revision 0)
@@ -0,0 +1,136 @@
+package org.apache.lucene.index.cache;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import java.io.IOException;
+
+/** 
+ * A Key for identifying cacheable Strings. Caches all unique Strings as well 
+ * as lookup indexes.
+ *
+ * CacheKey instances provide all of the functionality for generating 
+ * CacheData based on an IndexReader instance.
+ */
+public class StringIndexCacheKey extends CacheKey {
+
+  public static class StringIndex {
+
+    /** All the term values, in natural order. */
+    public final String[] lookup;
+
+    /** For each document, an index into the lookup array. */
+    public final int[] order;
+
+    /** Creates one of these objects */
+    public StringIndex (int[] values, String[] lookup) {
+      this.order = values;
+      this.lookup = lookup;
+    }
+  }
+
+  String field;
+    
+  public StringIndexCacheKey(String f) {
+    field = f.intern();
+  }
+
+  public int hashCode() {
+    final int prime = 31;
+    int result = prime + ((field == null) ? 0 : field.hashCode());
+    return result;
+  }
+
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final StringIndexCacheKey other = (StringIndexCacheKey) obj;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    return true;
+  }
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+    return new CacheData(getStringIndex(reader, field));
+  }
+  
+  protected StringIndex getStringIndex(IndexReader reader, String field) throws IOException {
+    final int[] retArray = new int[reader.maxDoc()];
+    String[] mterms = new String[reader.maxDoc()+1];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms (new Term (field, ""));
+    int t = 0;  // current term number
+    
+    // an entry for documents that have no terms in this field
+    // should a document with no terms be at top or bottom?
+    // this puts them at the top - if it is changed, FieldDocSortedHitQueue
+    // needs to change as well.
+    mterms[t++] = null;
+    
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term==null || term.field() != field) break;
+        
+        // store term text
+        // we expect that there is at most one term per document
+        if (t >= mterms.length) 
+          throw new RuntimeException 
+            ("there are more terms than documents in field \"" + field + 
+             "\", but it's impossible to sort on tokenized fields");
+        mterms[t] = term.text();
+        
+        termDocs.seek (termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = t;
+        }
+        
+        t++;
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+    }
+    
+    if (t == 0) {
+      // if there are no terms, make the term array
+      // have a single null entry
+      mterms = new String[1];
+    } else if (t < mterms.length) {
+      // if there are less terms than documents,
+      // trim off the dead array space
+      String[] terms = new String[t];
+      System.arraycopy (mterms, 0, terms, 0, t);
+      mterms = terms;
+    }
+    
+    return new StringIndex(retArray, mterms);
+  }
+  
+  public boolean isMergable() { return false; }
+  
+}
Index: src/java/org/apache/lucene/index/DirectoryIndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/DirectoryIndexReader.java	(revision 642339)
+++ src/java/org/apache/lucene/index/DirectoryIndexReader.java	(working copy)
@@ -24,6 +24,8 @@
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.lucene.index.cache.Cache;
+import org.apache.lucene.index.cache.SimpleMapCache;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.Lock;
 import org.apache.lucene.store.LockObtainFailedException;
@@ -93,6 +95,7 @@
           reader = new MultiSegmentReader(directory, infos, closeDirectory);
         }
         reader.setDeletionPolicy(deletionPolicy);
+        reader.useCacheFactory(Cache.FACTORY);
         return reader;
       }
     }.run();
@@ -100,9 +103,9 @@
 
   public final synchronized IndexReader reopen() throws CorruptIndexException, IOException {
     ensureOpen();
-
     if (this.hasChanges || this.isCurrent()) {
       // the index hasn't changed - nothing to do here
+
       return this;
     }
 
@@ -118,7 +121,7 @@
           newReader.init(directory, infos, closeDirectory);
           newReader.deletionPolicy = deletionPolicy;
         }
-
+        newReader.useCacheFactory(Cache.FACTORY);
         return newReader;
       }
     }.run();
Index: src/java/org/apache/lucene/index/IndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/IndexReader.java	(revision 642339)
+++ src/java/org/apache/lucene/index/IndexReader.java	(working copy)
@@ -17,6 +17,7 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.index.cache.*;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.FieldSelector;
 import org.apache.lucene.search.Similarity;
@@ -93,6 +94,7 @@
 
   private boolean closed;
   protected boolean hasChanges;
+  private Cache cache;
   
   private volatile int refCount;
   
@@ -149,10 +151,24 @@
   protected IndexReader(Directory directory) {
     this();
     this.directory = directory;
+    //TODO: find best set points
+    try {
+      cache = Cache.FACTORY.getCache(this);
+    } catch (IOException e) {
+      // TODO Auto-generated catch block
+      e.printStackTrace();
+    }
   }
   
-  protected IndexReader() { 
+  protected IndexReader() {
     refCount = 1;
+    //TODO: find best set points
+    try {
+      cache = Cache.FACTORY.getCache(this);
+    } catch (IOException e) {
+      // TODO: how to deal with this?
+      e.printStackTrace();
+    }
   }
   
   /**
@@ -783,7 +799,57 @@
     hasChanges = true;
     doUndeleteAll();
   }
+  
+  /** 
+   * Use this CacheFactory to generate a Cache for the current IndexReader.
+   * Closes the previous cache first.
+   */
+  public void useCacheFactory(CacheFactory factory) throws IOException {
+    if (null != cache) cache.close();
+    cache = factory.getCache(this);
+  }
 
+  /**
+   * EXPERT: raw access to the cache for introspection
+   */
+  public Cache getCache() {
+    return cache;
+  }
+
+  /**
+   * Generates and caches the data identified by the CacheKey, 
+   * if it is not already in the cache.
+   */
+  public CacheData getCachedData(CacheKey key) throws IOException {
+    CacheData value;
+    synchronized (cache) {
+      value = cache.get(key);
+      if (value == null) {
+        value = new CacheData(new CacheCreationPlaceholder());
+        cache.put(key, value);
+      }
+    }
+    Object payload = value.getPayload();
+    if (payload instanceof CacheCreationPlaceholder) {
+      synchronized (payload) {
+        CacheCreationPlaceholder progress = (CacheCreationPlaceholder) payload;
+        if (progress.value == null) {
+            progress.value = key.buildData(this);
+            synchronized (cache) {
+              cache.put(key, progress.value);
+            }
+        }
+        return progress.value;
+      }
+    }
+    return value;
+  }
+
+  private static final class CacheCreationPlaceholder {
+    CacheData value;
+  }
+
+
   /** Implements actual undeleteAll() in subclass. */
   protected abstract void doUndeleteAll() throws CorruptIndexException, IOException;
 
@@ -830,6 +896,7 @@
   public final synchronized void close() throws IOException {
     if (!closed) {
       decRef();
+      if (null != cache) cache.close();
       closed = true;
     }
   }
Index: src/java/org/apache/lucene/index/MultiReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiReader.java	(revision 642339)
+++ src/java/org/apache/lucene/index/MultiReader.java	(working copy)
@@ -27,6 +27,11 @@
 import org.apache.lucene.index.MultiSegmentReader.MultiTermDocs;
 import org.apache.lucene.index.MultiSegmentReader.MultiTermEnum;
 import org.apache.lucene.index.MultiSegmentReader.MultiTermPositions;
+import org.apache.lucene.index.cache.Cache;
+import org.apache.lucene.index.cache.CacheData;
+import org.apache.lucene.index.cache.CacheFactory;
+import org.apache.lucene.index.cache.CacheKey;
+import org.apache.lucene.index.cache.FieldValues;
 
 /** An IndexReader which reads multiple indexes, appending their content.
  *
@@ -85,6 +90,13 @@
         hasDeletions = true;
     }
     starts[subReaders.length] = maxDoc;
+    //TODO: find best spots for this
+    try {
+      useCacheFactory(Cache.FACTORY);
+    } catch (IOException e) {
+      // TODO: decide how to deal with this
+      e.printStackTrace();
+    }
   }
 
   /**
@@ -161,6 +173,19 @@
       }
     }
   }
+  
+  /** 
+   * Use this CacheFactory to generate a Cache for the current IndexReader, 
+   * as well as all of the subReaders.
+   *
+   * Closes the previous cache first.
+   */
+  public void useCacheFactory(CacheFactory factory) throws IOException {
+    super.useCacheFactory(factory);
+    for (int i = 0; i < subReaders.length; i++) {
+      subReaders[i].useCacheFactory(factory);
+    }
+  }
 
   public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
     ensureOpen();
@@ -336,6 +361,48 @@
     }
   }
   
+  /**
+   * Generates and caches the data identified by the CacheKey, if it is not already in the cache.
+   * 
+   * Delegates to subReaders if the CacheKey is mergeable, and then caches the merged result.
+   */
+  public CacheData getCachedData(final CacheKey key) throws IOException {
+    if( ! key.usesFieldValues()) {
+      if ( ! key.isMergable() )
+        return super.getCachedData(key);
+
+      CacheData[] data = new CacheData[subReaders.length];
+      for (int i = 0; i < subReaders.length; i++) {
+        data[i] = subReaders[i].getCachedData(key);
+      }
+
+      return key.mergeData(starts, data);
+    }
+    
+    FieldValues multiFieldValues = new FieldValues() {
+      CacheData[] data = new CacheData[subReaders.length];
+
+      {
+        for (int i = 0; i < subReaders.length; i++) {
+          data[i] = subReaders[i].getCachedData(key);
+        }
+      }
+
+      public Comparable getOrd(int doc) {
+        int i = readerIndex(doc); // find segment num
+        return ((FieldValues)data[i].getPayload()).getOrd(doc - starts[i]); // dispatch to segment reader
+      }
+
+      public Comparable getValue(int doc) {
+        int i = readerIndex(doc); // find segment num
+        return ((FieldValues)data[i].getPayload()).getValue(doc - starts[i]); // dispatch to segment reader
+      }
+
+    };
+    
+    return new CacheData(multiFieldValues);
+  }
+  
   public Collection getFieldNames (IndexReader.FieldOption fieldNames) {
     ensureOpen();
     return MultiSegmentReader.getFieldNames(fieldNames, this.subReaders);
Index: src/java/org/apache/lucene/index/MultiSegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiSegmentReader.java	(revision 642339)
+++ src/java/org/apache/lucene/index/MultiSegmentReader.java	(working copy)
@@ -19,6 +19,9 @@
 
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.index.cache.CacheData;
+import org.apache.lucene.index.cache.CacheKey;
+import org.apache.lucene.index.cache.FieldValues;
 import org.apache.lucene.store.Directory;
 
 import java.io.IOException;
@@ -30,19 +33,20 @@
 import java.util.Map;
 import java.util.Set;
 
-/** 
+/**
  * An IndexReader which reads indexes with multiple segments.
  */
 class MultiSegmentReader extends DirectoryIndexReader {
   protected SegmentReader[] subReaders;
-  private int[] starts;                           // 1st docno for each segment
+  private int[] starts; // 1st docno for each segment
   private Hashtable normsCache = new Hashtable();
   private int maxDoc = 0;
   private int numDocs = -1;
   private boolean hasDeletions = false;
 
   /** Construct reading the named set of readers. */
-  MultiSegmentReader(Directory directory, SegmentInfos sis, boolean closeDirectory) throws IOException {
+  MultiSegmentReader(Directory directory, SegmentInfos sis, boolean closeDirectory)
+      throws IOException {
     super(directory, sis, closeDirectory);
     // To reduce the chance of hitting FileNotFound
     // (and having to retry), we open segments in
@@ -50,12 +54,12 @@
     // the newest segments first.
 
     SegmentReader[] readers = new SegmentReader[sis.size()];
-    for (int i = sis.size()-1; i >= 0; i--) {
+    for (int i = sis.size() - 1; i >= 0; i--) {
       try {
         readers[i] = SegmentReader.get(sis.info(i));
       } catch (IOException e) {
         // Close all readers we had opened:
-        for(i++;i<sis.size();i++) {
+        for (i++; i < sis.size(); i++) {
           try {
             readers[i].close();
           } catch (IOException ignore) {
@@ -70,9 +74,10 @@
   }
 
   /** This contructor is only used for {@link #reopen()} */
-  MultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache) throws IOException {
+  MultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory,
+      SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache) throws IOException {
     super(directory, infos, closeDirectory);
-    
+
     // we put the old SegmentReaders in a map, that allows us
     // to lookup a reader using its segment name
     Map segmentReaders = new HashMap();
@@ -83,14 +88,14 @@
         segmentReaders.put(oldReaders[i].getSegmentName(), new Integer(i));
       }
     }
-    
+
     SegmentReader[] newReaders = new SegmentReader[infos.size()];
-    
+
     // remember which readers are shared between the old and the re-opened
     // MultiSegmentReader - we have to incRef those readers
     boolean[] readerShared = new boolean[infos.size()];
-    
-    for (int i = infos.size() - 1; i>=0; i--) {
+
+    for (int i = infos.size() - 1; i >= 0; i--) {
       // find SegmentReader for this segment
       Integer oldReaderIndex = (Integer) segmentReaders.get(infos.info(i).name);
       if (oldReaderIndex == null) {
@@ -104,7 +109,9 @@
       boolean success = false;
       try {
         SegmentReader newReader;
-        if (newReaders[i] == null || infos.info(i).getUseCompoundFile() != newReaders[i].getSegmentInfo().getUseCompoundFile()) {
+        if (newReaders[i] == null
+            || infos.info(i).getUseCompoundFile() != newReaders[i].getSegmentInfo()
+                .getUseCompoundFile()) {
           // this is a new reader; in case we hit an exception we can close it safely
           newReader = SegmentReader.get(infos.info(i));
         } else {
@@ -141,11 +148,11 @@
           }
         }
       }
-    }    
-    
+    }
+
     // initialize the readers to calculate maxDoc before we try to reuse the old normsCache
     initialize(newReaders);
-    
+
     // try to copy unchanged norms from the old normsCache to the new one
     if (oldNormsCache != null) {
       Iterator it = oldNormsCache.keySet().iterator();
@@ -154,38 +161,81 @@
         if (!hasNorms(field)) {
           continue;
         }
-        
+
         byte[] oldBytes = (byte[]) oldNormsCache.get(field);
-  
+
         byte[] bytes = new byte[maxDoc()];
-        
+
         for (int i = 0; i < subReaders.length; i++) {
           Integer oldReaderIndex = ((Integer) segmentReaders.get(subReaders[i].getSegmentName()));
 
-          // this SegmentReader was not re-opened, we can copy all of its norms 
-          if (oldReaderIndex != null &&
-               (oldReaders[oldReaderIndex.intValue()] == subReaders[i] 
-                 || oldReaders[oldReaderIndex.intValue()].norms.get(field) == subReaders[i].norms.get(field))) {
+          // this SegmentReader was not re-opened, we can copy all of its norms
+          if (oldReaderIndex != null
+              && (oldReaders[oldReaderIndex.intValue()] == subReaders[i] || oldReaders[oldReaderIndex
+                  .intValue()].norms.get(field) == subReaders[i].norms.get(field))) {
             // we don't have to synchronize here: either this constructor is called from a SegmentReader,
             // in which case no old norms cache is present, or it is called from MultiReader.reopen(),
             // which is synchronized
-            System.arraycopy(oldBytes, oldStarts[oldReaderIndex.intValue()], bytes, starts[i], starts[i+1] - starts[i]);
+            System.arraycopy(oldBytes, oldStarts[oldReaderIndex.intValue()], bytes, starts[i],
+                starts[i + 1] - starts[i]);
           } else {
             subReaders[i].norms(field, bytes, starts[i]);
           }
         }
-        
-        normsCache.put(field, bytes);      // update cache
+
+        normsCache.put(field, bytes); // update cache
       }
     }
   }
 
+  /**
+   * Generates and caches the data identified by the CacheKey, if it is not already in the cache.
+   * 
+   * Delegates to subReaders if the CacheKey is mergeable, and then caches the merged result.
+   */
+  public CacheData getCachedData(final CacheKey key) throws IOException {
+    if( ! key.usesFieldValues()) {
+      if ( ! key.isMergable() )
+        return super.getCachedData(key);
+
+      CacheData[] data = new CacheData[subReaders.length];
+      for (int i = 0; i < subReaders.length; i++) {
+        data[i] = subReaders[i].getCachedData(key);
+      }
+
+      return key.mergeData(starts, data);
+    }
+    
+    FieldValues multiFieldValues = new FieldValues() {
+      CacheData[] data = new CacheData[subReaders.length];
+
+      {
+        for (int i = 0; i < subReaders.length; i++) {
+          data[i] = subReaders[i].getCachedData(key);
+        }
+      }
+
+      public Comparable getOrd(int doc) {
+        int i = readerIndex(doc); // find segment num
+        return ((FieldValues)data[i].getPayload()).getOrd(doc - starts[i]); // dispatch to segment reader
+      }
+
+      public Comparable getValue(int doc) {
+        int i = readerIndex(doc); // find segment num
+        return ((FieldValues)data[i].getPayload()).getValue(doc - starts[i]); // dispatch to segment reader
+      }
+
+    };
+    
+    return new CacheData(multiFieldValues);
+  }
+
   private void initialize(SegmentReader[] subReaders) {
     this.subReaders = subReaders;
-    starts = new int[subReaders.length + 1];    // build starts array
+    starts = new int[subReaders.length + 1]; // build starts array
     for (int i = 0; i < subReaders.length; i++) {
       starts[i] = maxDoc;
-      maxDoc += subReaders[i].maxDoc();      // compute maxDocs
+      maxDoc += subReaders[i].maxDoc(); // compute maxDocs
 
       if (subReaders[i].hasDeletions())
         hasDeletions = true;
@@ -193,53 +243,54 @@
     starts[subReaders.length] = maxDoc;
   }
 
-  protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException {
+  protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos)
+      throws CorruptIndexException, IOException {
     if (infos.size() == 1) {
       // The index has only one segment now, so we can't refresh the MultiSegmentReader.
       // Return a new SegmentReader instead
       SegmentReader newReader = SegmentReader.get(infos, infos.info(0), false);
       return newReader;
     } else {
-      return new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache);
-    }            
+      return new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts,
+          normsCache);
+    }
   }
 
   public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
     ensureOpen();
-    int i = readerIndex(n);        // find segment num
+    int i = readerIndex(n); // find segment num
     return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
   }
 
-  public TermFreqVector getTermFreqVector(int n, String field)
-      throws IOException {
+  public TermFreqVector getTermFreqVector(int n, String field) throws IOException {
     ensureOpen();
-    int i = readerIndex(n);        // find segment num
+    int i = readerIndex(n); // find segment num
     return subReaders[i].getTermFreqVector(n - starts[i], field);
   }
 
-
-  public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
+  public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper)
+      throws IOException {
     ensureOpen();
-    int i = readerIndex(docNumber);        // find segment num
+    int i = readerIndex(docNumber); // find segment num
     subReaders[i].getTermFreqVector(docNumber - starts[i], field, mapper);
   }
 
   public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
     ensureOpen();
-    int i = readerIndex(docNumber);        // find segment num
+    int i = readerIndex(docNumber); // find segment num
     subReaders[i].getTermFreqVector(docNumber - starts[i], mapper);
   }
 
   public boolean isOptimized() {
     return false;
   }
-  
+
   public synchronized int numDocs() {
     // Don't call ensureOpen() here (it could affect performance)
-    if (numDocs == -1) {        // check cache
-      int n = 0;                // cache miss--recompute
+    if (numDocs == -1) { // check cache
+      int n = 0; // cache miss--recompute
       for (int i = 0; i < subReaders.length; i++)
-        n += subReaders[i].numDocs();      // sum from readers
+        n += subReaders[i].numDocs(); // sum from readers
       numDocs = n;
     }
     return numDocs;
@@ -251,16 +302,17 @@
   }
 
   // inherit javadoc
-  public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
+  public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException,
+      IOException {
     ensureOpen();
-    int i = readerIndex(n);                          // find segment num
-    return subReaders[i].document(n - starts[i], fieldSelector);    // dispatch to segment reader
+    int i = readerIndex(n); // find segment num
+    return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader
   }
 
   public boolean isDeleted(int n) {
     // Don't call ensureOpen() here (it could affect performance)
-    int i = readerIndex(n);                           // find segment num
-    return subReaders[i].isDeleted(n - starts[i]);    // dispatch to segment reader
+    int i = readerIndex(n); // find segment num
+    return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader
   }
 
   public boolean hasDeletions() {
@@ -269,9 +321,9 @@
   }
 
   protected void doDelete(int n) throws CorruptIndexException, IOException {
-    numDocs = -1;                             // invalidate cache
-    int i = readerIndex(n);                   // find segment num
-    subReaders[i].deleteDocument(n - starts[i]);      // dispatch to segment reader
+    numDocs = -1; // invalidate cache
+    int i = readerIndex(n); // find segment num
+    subReaders[i].deleteDocument(n - starts[i]); // dispatch to segment reader
     hasDeletions = true;
   }
 
@@ -280,17 +332,17 @@
       subReaders[i].undeleteAll();
 
     hasDeletions = false;
-    numDocs = -1;                                 // invalidate cache
+    numDocs = -1; // invalidate cache
   }
 
-  private int readerIndex(int n) {    // find reader for doc n:
+  private int readerIndex(int n) { // find reader for doc n:
     return readerIndex(n, this.starts, this.subReaders.length);
   }
-  
-  static int readerIndex(int n, int[] starts, int numSubReaders) {    // find reader for doc n:
-    int lo = 0;                                      // search starts array
-    int hi = numSubReaders - 1;                  // for first element less
 
+  static int readerIndex(int n, int[] starts, int numSubReaders) { // find reader for doc n:
+    int lo = 0; // search starts array
+    int hi = numSubReaders - 1; // for first element less
+
     while (hi >= lo) {
       int mid = (lo + hi) >> 1;
       int midValue = starts[mid];
@@ -298,9 +350,9 @@
         hi = mid - 1;
       else if (n > midValue)
         lo = mid + 1;
-      else {                                      // found a match
-        while (mid+1 < numSubReaders && starts[mid+1] == midValue) {
-          mid++;                                  // scan to last match
+      else { // found a match
+        while (mid + 1 < numSubReaders && starts[mid + 1] == midValue) {
+          mid++; // scan to last match
         }
         return mid;
       }
@@ -311,49 +363,53 @@
   public boolean hasNorms(String field) throws IOException {
     ensureOpen();
     for (int i = 0; i < subReaders.length; i++) {
-      if (subReaders[i].hasNorms(field)) return true;
+      if (subReaders[i].hasNorms(field))
+        return true;
     }
     return false;
   }
 
   private byte[] ones;
+
   private byte[] fakeNorms() {
-    if (ones==null) ones=SegmentReader.createFakeNorms(maxDoc());
+    if (ones == null)
+      ones = SegmentReader.createFakeNorms(maxDoc());
     return ones;
   }
 
   public synchronized byte[] norms(String field) throws IOException {
     ensureOpen();
-    byte[] bytes = (byte[])normsCache.get(field);
+    byte[] bytes = (byte[]) normsCache.get(field);
     if (bytes != null)
-      return bytes;          // cache hit
+      return bytes; // cache hit
     if (!hasNorms(field))
       return fakeNorms();
 
     bytes = new byte[maxDoc()];
     for (int i = 0; i < subReaders.length; i++)
       subReaders[i].norms(field, bytes, starts[i]);
-    normsCache.put(field, bytes);      // update cache
+    normsCache.put(field, bytes); // update cache
     return bytes;
   }
 
-  public synchronized void norms(String field, byte[] result, int offset)
-    throws IOException {
+  public synchronized void norms(String field, byte[] result, int offset) throws IOException {
     ensureOpen();
-    byte[] bytes = (byte[])normsCache.get(field);
-    if (bytes==null && !hasNorms(field)) bytes=fakeNorms();
-    if (bytes != null)                            // cache hit
+    byte[] bytes = (byte[]) normsCache.get(field);
+    if (bytes == null && !hasNorms(field))
+      bytes = fakeNorms();
+    if (bytes != null) // cache hit
       System.arraycopy(bytes, 0, result, offset, maxDoc());
 
-    for (int i = 0; i < subReaders.length; i++)      // read from segments
+    for (int i = 0; i < subReaders.length; i++)
+      // read from segments
       subReaders[i].norms(field, result, offset + starts[i]);
   }
 
-  protected void doSetNorm(int n, String field, byte value)
-    throws CorruptIndexException, IOException {
-    normsCache.remove(field);                         // clear cache
-    int i = readerIndex(n);                           // find segment num
-    subReaders[i].setNorm(n-starts[i], field, value); // dispatch
+  protected void doSetNorm(int n, String field, byte value) throws CorruptIndexException,
+      IOException {
+    normsCache.remove(field); // clear cache
+    int i = readerIndex(n); // find segment num
+    subReaders[i].setNorm(n - starts[i], field, value); // dispatch
   }
 
   public TermEnum terms() throws IOException {
@@ -368,7 +424,7 @@
 
   public int docFreq(Term t) throws IOException {
     ensureOpen();
-    int total = 0;          // sum freqs in segments
+    int total = 0; // sum freqs in segments
     for (int i = 0; i < subReaders.length; i++)
       total += subReaders[i].docFreq(t);
     return total;
@@ -406,17 +462,17 @@
   protected synchronized void doClose() throws IOException {
     for (int i = 0; i < subReaders.length; i++)
       subReaders[i].decRef();
-    
+
     // maybe close directory
     super.doClose();
   }
 
-  public Collection getFieldNames (IndexReader.FieldOption fieldNames) {
+  public Collection getFieldNames(IndexReader.FieldOption fieldNames) {
     ensureOpen();
     return getFieldNames(fieldNames, this.subReaders);
   }
-  
-  static Collection getFieldNames (IndexReader.FieldOption fieldNames, IndexReader[] subReaders) {
+
+  static Collection getFieldNames(IndexReader.FieldOption fieldNames, IndexReader[] subReaders) {
     // maintain a unique set of field names
     Set fieldSet = new HashSet();
     for (int i = 0; i < subReaders.length; i++) {
@@ -425,8 +481,8 @@
       fieldSet.addAll(names);
     }
     return fieldSet;
-  } 
-  
+  }
+
   // for testing
   SegmentReader[] getSubReaders() {
     return subReaders;
@@ -446,64 +502,63 @@
 
   static class MultiTermEnum extends TermEnum {
     private SegmentMergeQueue queue;
-  
+
     private Term term;
     private int docFreq;
-  
-    public MultiTermEnum(IndexReader[] readers, int[] starts, Term t)
-      throws IOException {
+
+    public MultiTermEnum(IndexReader[] readers, int[] starts, Term t) throws IOException {
       queue = new SegmentMergeQueue(readers.length);
       for (int i = 0; i < readers.length; i++) {
         IndexReader reader = readers[i];
         TermEnum termEnum;
-  
+
         if (t != null) {
           termEnum = reader.terms(t);
         } else
           termEnum = reader.terms();
-  
+
         SegmentMergeInfo smi = new SegmentMergeInfo(starts[i], termEnum, reader);
         if (t == null ? smi.next() : termEnum.term() != null)
-          queue.put(smi);          // initialize queue
+          queue.put(smi); // initialize queue
         else
           smi.close();
       }
-  
+
       if (t != null && queue.size() > 0) {
         next();
       }
     }
-  
+
     public boolean next() throws IOException {
-      SegmentMergeInfo top = (SegmentMergeInfo)queue.top();
+      SegmentMergeInfo top = (SegmentMergeInfo) queue.top();
       if (top == null) {
         term = null;
         return false;
       }
-  
+
       term = top.term;
       docFreq = 0;
-  
+
       while (top != null && term.compareTo(top.term) == 0) {
         queue.pop();
-        docFreq += top.termEnum.docFreq();    // increment freq
+        docFreq += top.termEnum.docFreq(); // increment freq
         if (top.next())
-          queue.put(top);          // restore queue
+          queue.put(top); // restore queue
         else
-          top.close();          // done with a segment
-        top = (SegmentMergeInfo)queue.top();
+          top.close(); // done with a segment
+        top = (SegmentMergeInfo) queue.top();
       }
       return true;
     }
-  
+
     public Term term() {
       return term;
     }
-  
+
     public int docFreq() {
       return docFreq;
     }
-  
+
     public void close() throws IOException {
       queue.close();
     }
@@ -513,44 +568,44 @@
     protected IndexReader[] readers;
     protected int[] starts;
     protected Term term;
-  
+
     protected int base = 0;
     protected int pointer = 0;
-  
+
     private TermDocs[] readerTermDocs;
-    protected TermDocs current;              // == readerTermDocs[pointer]
-  
+    protected TermDocs current; // == readerTermDocs[pointer]
+
     public MultiTermDocs(IndexReader[] r, int[] s) {
       readers = r;
       starts = s;
-  
+
       readerTermDocs = new TermDocs[r.length];
     }
-  
+
     public int doc() {
       return base + current.doc();
     }
+
     public int freq() {
       return current.freq();
     }
-  
+
     public void seek(Term term) {
       this.term = term;
       this.base = 0;
       this.pointer = 0;
       this.current = null;
     }
-  
+
     public void seek(TermEnum termEnum) throws IOException {
       seek(termEnum.term());
     }
-  
+
     public boolean next() throws IOException {
-      for(;;) {
-        if (current!=null && current.next()) {
+      for (;;) {
+        if (current != null && current.next()) {
           return true;
-        }
-        else if (pointer < readers.length) {
+        } else if (pointer < readers.length) {
           base = starts[pointer];
           current = termDocs(pointer++);
         } else {
@@ -558,12 +613,12 @@
         }
       }
     }
-  
+
     /** Optimized implementation. */
     public int read(final int[] docs, final int[] freqs) throws IOException {
       while (true) {
         while (current == null) {
-          if (pointer < readers.length) {      // try next segment
+          if (pointer < readers.length) { // try next segment
             base = starts[pointer];
             current = termDocs(pointer++);
           } else {
@@ -571,21 +626,21 @@
           }
         }
         int end = current.read(docs, freqs);
-        if (end == 0) {          // none left in segment
+        if (end == 0) { // none left in segment
           current = null;
-        } else {            // got some
-          final int b = base;        // adjust doc numbers
+        } else { // got some
+          final int b = base; // adjust doc numbers
           for (int i = 0; i < end; i++)
-           docs[i] += b;
+            docs[i] += b;
           return end;
         }
       }
     }
-  
-   /* A Possible future optimization could skip entire segments */ 
+
+    /* A Possible future optimization could skip entire segments */
     public boolean skipTo(int target) throws IOException {
-      for(;;) {
-        if (current != null && current.skipTo(target-base)) {
+      for (;;) {
+        if (current != null && current.skipTo(target - base)) {
           return true;
         } else if (pointer < readers.length) {
           base = starts[pointer];
@@ -594,7 +649,7 @@
           return false;
       }
     }
-  
+
     private TermDocs termDocs(int i) throws IOException {
       if (term == null)
         return null;
@@ -604,12 +659,11 @@
       result.seek(term);
       return result;
     }
-  
-    protected TermDocs termDocs(IndexReader reader)
-      throws IOException {
+
+    protected TermDocs termDocs(IndexReader reader) throws IOException {
       return reader.termDocs();
     }
-  
+
     public void close() throws IOException {
       for (int i = 0; i < readerTermDocs.length; i++) {
         if (readerTermDocs[i] != null)
@@ -620,26 +674,25 @@
 
   static class MultiTermPositions extends MultiTermDocs implements TermPositions {
     public MultiTermPositions(IndexReader[] r, int[] s) {
-      super(r,s);
+      super(r, s);
     }
-  
+
     protected TermDocs termDocs(IndexReader reader) throws IOException {
-      return (TermDocs)reader.termPositions();
+      return (TermDocs) reader.termPositions();
     }
-  
+
     public int nextPosition() throws IOException {
-      return ((TermPositions)current).nextPosition();
+      return ((TermPositions) current).nextPosition();
     }
-    
+
     public int getPayloadLength() {
-      return ((TermPositions)current).getPayloadLength();
+      return ((TermPositions) current).getPayloadLength();
     }
-     
+
     public byte[] getPayload(byte[] data, int offset) throws IOException {
-      return ((TermPositions)current).getPayload(data, offset);
+      return ((TermPositions) current).getPayload(data, offset);
     }
-  
-  
+
     // TODO: Remove warning after API has been finalized
     public boolean isPayloadAvailable() {
       return ((TermPositions) current).isPayloadAvailable();
Index: src/java/org/apache/lucene/search/ExtendedFieldCache.java
===================================================================
--- src/java/org/apache/lucene/search/ExtendedFieldCache.java	(revision 642339)
+++ src/java/org/apache/lucene/search/ExtendedFieldCache.java	(working copy)
@@ -1,29 +1,23 @@
 package org.apache.lucene.search;
 
-import org.apache.lucene.index.IndexReader;
-
 import java.io.IOException;
 
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.cache.DoubleArrayCacheKey;
+import org.apache.lucene.index.cache.LongArrayCacheKey;
 
+
 /**
  *
- *
+ * @deprecated use IndexReader.getCachedData
  **/
 public interface ExtendedFieldCache extends FieldCache {
-  public interface LongParser {
-    /**
-     * Return an long representation of this field's value.
-     */
-    public long parseLong(String string);
-  }
+  public interface LongParser extends LongArrayCacheKey.LongParser { }
+  public interface DoubleParser extends DoubleArrayCacheKey.DoubleParser { }
 
-  public interface DoubleParser {
-    /**
-     * Return an long representation of this field's value.
-     */
-    public double parseDouble(String string);
-  }
-
+  /**
+  * @deprecated 
+  **/
   public static ExtendedFieldCache EXT_DEFAULT = new ExtendedFieldCacheImpl();
 
   /**
@@ -36,6 +30,7 @@
    * @param field  Which field contains the longs.
    * @return The values in the given field for each document.
    * @throws java.io.IOException If any error occurs.
+   * @deprecated use IndexReader.getCachedData(reader, new LongArrayCacheKey(field))
    */
   public long[] getLongs(IndexReader reader, String field)
           throws IOException;
@@ -51,6 +46,7 @@
    * @param parser Computes integer for string values.
    * @return The values in the given field for each document.
    * @throws IOException If any error occurs.
+   * @deprecated use IndexReader.getCachedData(reader, new LongArrayCacheKey(field), parser)
    */
   public long[] getLongs(IndexReader reader, String field, LongParser parser)
           throws IOException;
@@ -66,6 +62,7 @@
    * @param field  Which field contains the doubles.
    * @return The values in the given field for each document.
    * @throws IOException If any error occurs.
+   * @deprecated use IndexReader.getCachedData(reader, new DoubleArrayCacheKey(field))
    */
   public double[] getDoubles(IndexReader reader, String field)
           throws IOException;
@@ -81,6 +78,7 @@
    * @param parser Computes integer for string values.
    * @return The values in the given field for each document.
    * @throws IOException If any error occurs.
+   * @deprecated use IndexReader.getCachedData(reader, new DoubleArrayCacheKey(field), parser)
    */
   public double[] getDoubles(IndexReader reader, String field, DoubleParser parser)
           throws IOException;
Index: src/java/org/apache/lucene/search/ExtendedFieldCacheImpl.java
===================================================================
--- src/java/org/apache/lucene/search/ExtendedFieldCacheImpl.java	(revision 642339)
+++ src/java/org/apache/lucene/search/ExtendedFieldCacheImpl.java	(working copy)
@@ -1,28 +1,27 @@
 package org.apache.lucene.search;
 
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermDocs;
-import org.apache.lucene.index.TermEnum;
+import org.apache.lucene.index.cache.CacheKey;
+import org.apache.lucene.index.cache.DoubleArrayCacheKey;
+import org.apache.lucene.index.cache.LongArrayCacheKey;
 
 import java.io.IOException;
 
 
 /**
- *
- *
+ * @deprecated use IndexReader.getCachedData
  **/
 class ExtendedFieldCacheImpl extends FieldCacheImpl implements ExtendedFieldCache {
   private static final LongParser LONG_PARSER = new LongParser() {
-      public long parseLong(String value) {
-        return Long.parseLong(value);
-      }
+    public long parseLong(String value) {
+      return Long.parseLong(value);
+    }
   };
 
   private static final DoubleParser DOUBLE_PARSER = new DoubleParser() {
-      public double parseDouble(String value) {
-        return Double.parseDouble(value);
-      }
+    public double parseDouble(String value) {
+      return Double.parseDouble(value);
+    }
   };
 
 
@@ -33,37 +32,10 @@
   // inherit javadocs
   public long[] getLongs(IndexReader reader, String field, LongParser parser)
       throws IOException {
-    return (long[]) longsCache.get(reader, new Entry(field, parser));
+    return (long[]) reader.getCachedData
+     (new LongArrayCacheKey(field, parser)).getPayload();
   }
 
-  Cache longsCache = new Cache() {
-
-    protected Object createValue(IndexReader reader, Object entryKey)
-        throws IOException {
-      Entry entry = (Entry) entryKey;
-      String field = entry.field;
-      LongParser parser = (LongParser) entry.custom;
-      final long[] retArray = new long[reader.maxDoc()];
-      TermDocs termDocs = reader.termDocs();
-      TermEnum termEnum = reader.terms (new Term(field, ""));
-      try {
-        do {
-          Term term = termEnum.term();
-          if (term==null || term.field() != field) break;
-          long termval = parser.parseLong(term.text());
-          termDocs.seek (termEnum);
-          while (termDocs.next()) {
-            retArray[termDocs.doc()] = termval;
-          }
-        } while (termEnum.next());
-      } finally {
-        termDocs.close();
-        termEnum.close();
-      }
-      return retArray;
-    }
-  };
-
   // inherit javadocs
   public double[] getDoubles(IndexReader reader, String field)
     throws IOException {
@@ -73,93 +45,14 @@
   // inherit javadocs
   public double[] getDoubles(IndexReader reader, String field, DoubleParser parser)
       throws IOException {
-    return (double[]) doublesCache.get(reader, new Entry(field, parser));
+    return (double[]) reader.getCachedData
+    (new DoubleArrayCacheKey(field, parser)).getPayload();
   }
 
-  Cache doublesCache = new Cache() {
 
-    protected Object createValue(IndexReader reader, Object entryKey)
-        throws IOException {
-      Entry entry = (Entry) entryKey;
-      String field = entry.field;
-      DoubleParser parser = (DoubleParser) entry.custom;
-      final double[] retArray = new double[reader.maxDoc()];
-      TermDocs termDocs = reader.termDocs();
-      TermEnum termEnum = reader.terms (new Term (field, ""));
-      try {
-        do {
-          Term term = termEnum.term();
-          if (term==null || term.field() != field) break;
-          double termval = parser.parseDouble(term.text());
-          termDocs.seek (termEnum);
-          while (termDocs.next()) {
-            retArray[termDocs.doc()] = termval;
-          }
-        } while (termEnum.next());
-      } finally {
-        termDocs.close();
-        termEnum.close();
-      }
-      return retArray;
-    }
-  };
-
-
   // inherit javadocs
   public Object getAuto(IndexReader reader, String field) throws IOException {
-    return autoCache.get(reader, field);
+    return CacheKey.getAutoCacheKey(reader, field);
   }
 
-  Cache autoCache = new Cache() {
-
-    protected Object createValue(IndexReader reader, Object fieldKey)
-        throws IOException {
-      String field = ((String)fieldKey).intern();
-      TermEnum enumerator = reader.terms (new Term (field, ""));
-      try {
-        Term term = enumerator.term();
-        if (term == null) {
-          throw new RuntimeException ("no terms in field " + field + " - cannot determine sort type");
-        }
-        Object ret = null;
-        if (term.field() == field) {
-          String termtext = term.text().trim();
-
-          /**
-           * Java 1.4 level code:
-
-           if (pIntegers.matcher(termtext).matches())
-           return IntegerSortedHitQueue.comparator (reader, enumerator, field);
-
-           else if (pFloats.matcher(termtext).matches())
-           return FloatSortedHitQueue.comparator (reader, enumerator, field);
-           */
-
-          // Java 1.3 level code:
-          try {
-            Integer.parseInt (termtext);
-            ret = getInts (reader, field);
-          } catch (NumberFormatException nfe1) {
-            try {
-              Long.parseLong(termtext);
-              ret = getLongs (reader, field);
-            } catch (NumberFormatException nfe2) {
-              try {
-                Float.parseFloat (termtext);
-                ret = getFloats (reader, field);
-              } catch (NumberFormatException nfe3) {
-                ret = getStringIndex (reader, field);
-              }
-            }
-          }
-        } else {
-          throw new RuntimeException ("field \"" + field + "\" does not appear to be indexed");
-        }
-        return ret;
-      } finally {
-        enumerator.close();
-      }
-    }
-  };
-
 }
Index: src/java/org/apache/lucene/search/FieldCache.java
===================================================================
--- src/java/org/apache/lucene/search/FieldCache.java	(revision 642339)
+++ src/java/org/apache/lucene/search/FieldCache.java	(working copy)
@@ -18,6 +18,7 @@
  */
 
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.cache.*;
 import java.io.IOException;
 
 /**
@@ -28,6 +29,7 @@
  * @author  Tim Jones (Nacimiento Software)
  * @since   lucene 1.4
  * @version $Id$
+ * @deprecated use IndexReader.getCachedData
  */
 public interface FieldCache {
 
@@ -37,53 +39,38 @@
   public static final int STRING_INDEX = -1;
 
 
-  /** Expert: Stores term text values and document ordering data. */
-  public static class StringIndex {
-
-    /** All the term values, in natural order. */
-    public final String[] lookup;
-
-    /** For each document, an index into the lookup array. */
-    public final int[] order;
-
-    /** Creates one of these objects */
+   /** Expert: Stores term text values and document ordering data. 
+     * @deprecated use StringIndexCacheKey.StringIndex
+     */
+  public static class StringIndex extends StringIndexCacheKey.StringIndex { 
     public StringIndex (int[] values, String[] lookup) {
-      this.order = values;
-      this.lookup = lookup;
-    }
+      super(values,lookup);
+     }
   }
 
   /** Interface to parse bytes from document fields.
    * @see FieldCache#getBytes(IndexReader, String, FieldCache.ByteParser)
+   * @deprecated use ByteArrayCacheKey.ByteParser
    */
-  public interface ByteParser {
-    /** Return a single Byte representation of this field's value. */
-    public byte parseByte(String string);
-  }
+  public interface ByteParser extends ByteArrayCacheKey.ByteParser { }
 
   /** Interface to parse shorts from document fields.
    * @see FieldCache#getShorts(IndexReader, String, FieldCache.ShortParser)
+   * @deprecated use IntArrayCacheKey.ShortParser
    */
-  public interface ShortParser {
-    /** Return a short representation of this field's value. */
-    public short parseShort(String string);
-  }
+  public interface ShortParser extends ShortArrayCacheKey.ShortParser { }
 
   /** Interface to parse ints from document fields.
    * @see FieldCache#getInts(IndexReader, String, FieldCache.IntParser)
+   * @deprecated use IntArrayCacheKey.IntParser
    */
-  public interface IntParser {
-    /** Return an integer representation of this field's value. */
-    public int parseInt(String string);
-  }
+  public interface IntParser extends IntArrayCacheKey.IntParser { }
 
   /** Interface to parse floats from document fields.
    * @see FieldCache#getFloats(IndexReader, String, FieldCache.FloatParser)
+   * @deprecated use FloatArrayCacheKey.FloatParser
    */
-  public interface FloatParser {
-    /** Return an float representation of this field's value. */
-    public float parseFloat(String string);
-  }
+  public interface FloatParser extends FloatArrayCacheKey.FloatParser { }
 
   /** Expert: The cache used internally by sorting and range query classes. */
   public static FieldCache DEFAULT = new FieldCacheImpl();
@@ -96,6 +83,7 @@
    * @param field   Which field contains the single byte values.
    * @return The values in the given field for each document.
    * @throws IOException  If any error occurs.
+   * @deprecated use IndexReader.getCachedData(reader, new ByteArrayCacheKey(field))
    */
   public byte[] getBytes (IndexReader reader, String field)
   throws IOException;
@@ -109,6 +97,7 @@
    * @param parser  Computes byte for string values.
    * @return The values in the given field for each document.
    * @throws IOException  If any error occurs.
+   * @deprecated use IndexReader.getCachedData(reader, new ByteArrayCacheKey(field), parser)
    */
   public byte[] getBytes (IndexReader reader, String field, ByteParser parser)
   throws IOException;
@@ -121,6 +110,7 @@
    * @param field   Which field contains the shorts.
    * @return The values in the given field for each document.
    * @throws IOException  If any error occurs.
+   * @deprecated use IndexReader.getCachedData(reader, new ShortArrayCacheKey(field))
    */
   public short[] getShorts (IndexReader reader, String field)
   throws IOException;
@@ -134,6 +124,7 @@
    * @param parser  Computes short for string values.
    * @return The values in the given field for each document.
    * @throws IOException  If any error occurs.
+   * @deprecated use IndexReader.getCachedData(reader, new ShortArrayCacheKey(field), parser)
    */
   public short[] getShorts (IndexReader reader, String field, ShortParser parser)
   throws IOException;
@@ -146,6 +137,7 @@
    * @param field   Which field contains the integers.
    * @return The values in the given field for each document.
    * @throws IOException  If any error occurs.
+   * @deprecated use IndexReader.getCachedData(reader, new IntArrayCacheKey(field))
    */
   public int[] getInts (IndexReader reader, String field)
   throws IOException;
@@ -159,6 +151,7 @@
    * @param parser  Computes integer for string values.
    * @return The values in the given field for each document.
    * @throws IOException  If any error occurs.
+   * @deprecated use IndexReader.getCachedData(reader, new IntArrayCacheKey(field), parser)
    */
   public int[] getInts (IndexReader reader, String field, IntParser parser)
   throws IOException;
@@ -171,6 +164,7 @@
    * @param field   Which field contains the floats.
    * @return The values in the given field for each document.
    * @throws IOException  If any error occurs.
+   * @deprecated use IndexReader.getCachedData(reader, new FloatArrayCacheKey(field))
    */
   public float[] getFloats (IndexReader reader, String field)
   throws IOException;
@@ -184,6 +178,7 @@
    * @param parser  Computes float for string values.
    * @return The values in the given field for each document.
    * @throws IOException  If any error occurs.
+   * @deprecated use IndexReader.getCachedData(reader, new FloatArrayCacheKey(field), parser)
    */
   public float[] getFloats (IndexReader reader, String field,
                             FloatParser parser) throws IOException;
@@ -196,6 +191,7 @@
    * @param field   Which field contains the strings.
    * @return The values in the given field for each document.
    * @throws IOException  If any error occurs.
+   * @deprecated use IndexReader.getCachedData(reader, new StringArrayCacheKey(field))
    */
   public String[] getStrings (IndexReader reader, String field)
   throws IOException;
@@ -208,6 +204,7 @@
    * @param field   Which field contains the strings.
    * @return Array of terms and index into the array for each document.
    * @throws IOException  If any error occurs.
+   * @deprecated use IndexReader.getCachedData(reader, new StringIndexArrayCacheKey(field))
    */
   public StringIndex getStringIndex (IndexReader reader, String field)
   throws IOException;
@@ -222,6 +219,7 @@
    * @param field   Which field contains the values.
    * @return int[], float[] or StringIndex.
    * @throws IOException  If any error occurs.
+   * @deprecated use CachKey.getAuto(reader, field)
    */
   public Object getAuto (IndexReader reader, String field)
   throws IOException;
@@ -236,6 +234,7 @@
    * @param comparator Used to convert terms into something to sort by.
    * @return Array of sort objects, one for each document.
    * @throws IOException  If any error occurs.
+   * @deprecated use IndexReader.getCacheData with a custom CacheKey
    */
   public Comparable[] getCustom (IndexReader reader, String field, SortComparator comparator)
   throws IOException;
Index: src/java/org/apache/lucene/search/FieldCacheImpl.java
===================================================================
--- src/java/org/apache/lucene/search/FieldCacheImpl.java	(revision 642339)
+++ src/java/org/apache/lucene/search/FieldCacheImpl.java	(working copy)
@@ -17,17 +17,24 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermDocs;
-import org.apache.lucene.index.TermEnum;
-
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Locale;
 import java.util.Map;
 import java.util.WeakHashMap;
 
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermEnum;
+import org.apache.lucene.index.cache.ByteArrayCacheKey;
+import org.apache.lucene.index.cache.CacheKey;
+import org.apache.lucene.index.cache.FloatArrayCacheKey;
+import org.apache.lucene.index.cache.IntArrayCacheKey;
+import org.apache.lucene.index.cache.LongArrayCacheKey;
+import org.apache.lucene.index.cache.ShortArrayCacheKey;
+import org.apache.lucene.index.cache.StringArrayCacheKey;
+import org.apache.lucene.index.cache.StringIndexCacheKey;
+
 /**
  * Expert: The default cache implementation, storing all values in memory.
  * A WeakHashMap is used for storage.
@@ -37,6 +44,7 @@
  * @author  Tim Jones (Nacimiento Software)
  * @since   lucene 1.4
  * @version $Id$
+ * @deprecated use IndexReader.getCachedData
  */
 class FieldCacheImpl
 implements FieldCache {
@@ -81,11 +89,14 @@
     }
   }
 
+  /** @deprecated */
   static final class CreationPlaceholder {
     Object value;
   }
 
-  /** Expert: Every composite-key in the internal cache is of this type. */
+  /** Expert: Every composite-key in the internal cache is of this type. 
+   * @deprecated 
+   */
   static class Entry {
     final String field;        // which Fieldable
     final int type;            // which SortField type
@@ -155,6 +166,7 @@
         return Float.parseFloat(value);
       }
   };
+  
 
   // inherit javadocs
   public byte[] getBytes (IndexReader reader, String field) throws IOException {
@@ -164,348 +176,119 @@
   // inherit javadocs
   public byte[] getBytes(IndexReader reader, String field, ByteParser parser)
       throws IOException {
-    return (byte[]) bytesCache.get(reader, new Entry(field, parser));
+    return (byte[]) reader.getCachedData
+    (new ByteArrayCacheKey(field, parser)).getPayload();
   }
-
-  Cache bytesCache = new Cache() {
-
-    protected Object createValue(IndexReader reader, Object entryKey)
-        throws IOException {
-      Entry entry = (Entry) entryKey;
-      String field = entry.field;
-      ByteParser parser = (ByteParser) entry.custom;
-      final byte[] retArray = new byte[reader.maxDoc()];
-      TermDocs termDocs = reader.termDocs();
-      TermEnum termEnum = reader.terms (new Term (field, ""));
-      try {
-        do {
-          Term term = termEnum.term();
-          if (term==null || term.field() != field) break;
-          byte termval = parser.parseByte(term.text());
-          termDocs.seek (termEnum);
-          while (termDocs.next()) {
-            retArray[termDocs.doc()] = termval;
-          }
-        } while (termEnum.next());
-      } finally {
-        termDocs.close();
-        termEnum.close();
-      }
-      return retArray;
-    }
-  };
   
   // inherit javadocs
   public short[] getShorts (IndexReader reader, String field) throws IOException {
-    return getShorts(reader, field, SHORT_PARSER);
+    return getShorts(reader, field, null);
   }
 
   // inherit javadocs
   public short[] getShorts(IndexReader reader, String field, ShortParser parser)
       throws IOException {
-    return (short[]) shortsCache.get(reader, new Entry(field, parser));
+    return (short[]) reader.getCachedData
+    (new ShortArrayCacheKey(field, parser)).getPayload();
   }
 
-  Cache shortsCache = new Cache() {
 
-    protected Object createValue(IndexReader reader, Object entryKey)
-        throws IOException {
-      Entry entry = (Entry) entryKey;
-      String field = entry.field;
-      ShortParser parser = (ShortParser) entry.custom;
-      final short[] retArray = new short[reader.maxDoc()];
-      TermDocs termDocs = reader.termDocs();
-      TermEnum termEnum = reader.terms (new Term (field, ""));
-      try {
-        do {
-          Term term = termEnum.term();
-          if (term==null || term.field() != field) break;
-          short termval = parser.parseShort(term.text());
-          termDocs.seek (termEnum);
-          while (termDocs.next()) {
-            retArray[termDocs.doc()] = termval;
-          }
-        } while (termEnum.next());
-      } finally {
-        termDocs.close();
-        termEnum.close();
-      }
-      return retArray;
-    }
-  };
   
   // inherit javadocs
   public int[] getInts (IndexReader reader, String field) throws IOException {
-    return getInts(reader, field, INT_PARSER);
+    return getInts(reader, field, null);
   }
 
   // inherit javadocs
   public int[] getInts(IndexReader reader, String field, IntParser parser)
       throws IOException {
-    return (int[]) intsCache.get(reader, new Entry(field, parser));
+        return (int[]) reader.getCachedData
+         (new IntArrayCacheKey(field, parser)).getPayload();
   }
 
-  Cache intsCache = new Cache() {
 
-    protected Object createValue(IndexReader reader, Object entryKey)
-        throws IOException {
-      Entry entry = (Entry) entryKey;
-      String field = entry.field;
-      IntParser parser = (IntParser) entry.custom;
-      final int[] retArray = new int[reader.maxDoc()];
-      TermDocs termDocs = reader.termDocs();
-      TermEnum termEnum = reader.terms (new Term (field, ""));
-      try {
-        do {
-          Term term = termEnum.term();
-          if (term==null || term.field() != field) break;
-          int termval = parser.parseInt(term.text());
-          termDocs.seek (termEnum);
-          while (termDocs.next()) {
-            retArray[termDocs.doc()] = termval;
-          }
-        } while (termEnum.next());
-      } finally {
-        termDocs.close();
-        termEnum.close();
-      }
-      return retArray;
-    }
-  };
 
-
   // inherit javadocs
   public float[] getFloats (IndexReader reader, String field)
     throws IOException {
-    return getFloats(reader, field, FLOAT_PARSER);
+    return getFloats(reader, field, null);
   }
 
   // inherit javadocs
   public float[] getFloats(IndexReader reader, String field, FloatParser parser)
       throws IOException {
-    return (float[]) floatsCache.get(reader, new Entry(field, parser));
+        return (float[]) reader.getCachedData
+          (new FloatArrayCacheKey(field, parser)).getPayload();
   }
 
-  Cache floatsCache = new Cache() {
-
-    protected Object createValue(IndexReader reader, Object entryKey)
-        throws IOException {
-      Entry entry = (Entry) entryKey;
-      String field = entry.field;
-      FloatParser parser = (FloatParser) entry.custom;
-      final float[] retArray = new float[reader.maxDoc()];
-      TermDocs termDocs = reader.termDocs();
-      TermEnum termEnum = reader.terms (new Term (field, ""));
-      try {
-        do {
-          Term term = termEnum.term();
-          if (term==null || term.field() != field) break;
-          float termval = parser.parseFloat(term.text());
-          termDocs.seek (termEnum);
-          while (termDocs.next()) {
-            retArray[termDocs.doc()] = termval;
-          }
-        } while (termEnum.next());
-      } finally {
-        termDocs.close();
-        termEnum.close();
-      }
-      return retArray;
-    }
-  };
-
   // inherit javadocs
   public String[] getStrings(IndexReader reader, String field)
       throws IOException {
-    return (String[]) stringsCache.get(reader, field);
+        return (String[]) reader.getCachedData
+         (new StringArrayCacheKey(field)).getPayload();
   }
 
-  Cache stringsCache = new Cache() {
-
-    protected Object createValue(IndexReader reader, Object fieldKey)
-        throws IOException {
-      String field = ((String) fieldKey).intern();
-      final String[] retArray = new String[reader.maxDoc()];
-      TermDocs termDocs = reader.termDocs();
-      TermEnum termEnum = reader.terms (new Term (field, ""));
-      try {
-        do {
-          Term term = termEnum.term();
-          if (term==null || term.field() != field) break;
-          String termval = term.text();
-          termDocs.seek (termEnum);
-          while (termDocs.next()) {
-            retArray[termDocs.doc()] = termval;
-          }
-        } while (termEnum.next());
-      } finally {
-        termDocs.close();
-        termEnum.close();
-      }
-      return retArray;
-    }
-  };
-
   // inherit javadocs
   public StringIndex getStringIndex(IndexReader reader, String field)
       throws IOException {
-    return (StringIndex) stringsIndexCache.get(reader, field);
+   StringIndexCacheKey.StringIndex data = (StringIndexCacheKey.StringIndex) 
+       reader.getCachedData(new StringIndexCacheKey(field)).getPayload();
+      return new StringIndex(data.order, data.lookup);
   }
 
-  Cache stringsIndexCache = new Cache() {
-
-    protected Object createValue(IndexReader reader, Object fieldKey)
-        throws IOException {
-      String field = ((String) fieldKey).intern();
-      final int[] retArray = new int[reader.maxDoc()];
-      String[] mterms = new String[reader.maxDoc()+1];
-      TermDocs termDocs = reader.termDocs();
-      TermEnum termEnum = reader.terms (new Term (field, ""));
-      int t = 0;  // current term number
-
-      // an entry for documents that have no terms in this field
-      // should a document with no terms be at top or bottom?
-      // this puts them at the top - if it is changed, FieldDocSortedHitQueue
-      // needs to change as well.
-      mterms[t++] = null;
-
-      try {
-        do {
-          Term term = termEnum.term();
-          if (term==null || term.field() != field) break;
-
-          // store term text
-          // we expect that there is at most one term per document
-          if (t >= mterms.length) throw new RuntimeException ("there are more terms than " +
-                  "documents in field \"" + field + "\", but it's impossible to sort on " +
-                  "tokenized fields");
-          mterms[t] = term.text();
-
-          termDocs.seek (termEnum);
-          while (termDocs.next()) {
-            retArray[termDocs.doc()] = t;
-          }
-
-          t++;
-        } while (termEnum.next());
-      } finally {
-        termDocs.close();
-        termEnum.close();
-      }
-
-      if (t == 0) {
-        // if there are no terms, make the term array
-        // have a single null entry
-        mterms = new String[1];
-      } else if (t < mterms.length) {
-        // if there are less terms than documents,
-        // trim off the dead array space
-        String[] terms = new String[t];
-        System.arraycopy (mterms, 0, terms, 0, t);
-        mterms = terms;
-      }
-
-      StringIndex value = new StringIndex (retArray, mterms);
-      return value;
-    }
-  };
-
-  /** The pattern used to detect integer values in a field */
-  /** removed for java 1.3 compatibility
-   protected static final Pattern pIntegers = Pattern.compile ("[0-9\\-]+");
-   **/
-
-  /** The pattern used to detect float values in a field */
-  /**
-   * removed for java 1.3 compatibility
-   * protected static final Object pFloats = Pattern.compile ("[0-9+\\-\\.eEfFdD]+");
-   */
-
 	// inherit javadocs
   public Object getAuto(IndexReader reader, String field) throws IOException {
-    return autoCache.get(reader, field);
-  }
 
-  Cache autoCache = new Cache() {
+    field = ((String)field).intern();
+    TermEnum enumerator = reader.terms (new Term (field, ""));
+    try {
+      Term term = enumerator.term();
+      if (term == null) {
+        throw new RuntimeException ("no terms in field " + field + " - cannot determine sort type");
+      }
+      CacheKey ret = null;
+      if (term.field() == field) {
+        String termtext = term.text().trim();
 
-    protected Object createValue(IndexReader reader, Object fieldKey)
-        throws IOException {
-      String field = ((String)fieldKey).intern();
-      TermEnum enumerator = reader.terms (new Term (field, ""));
-      try {
-        Term term = enumerator.term();
-        if (term == null) {
-          throw new RuntimeException ("no terms in field " + field + " - cannot determine sort type");
-        }
-        Object ret = null;
-        if (term.field() == field) {
-          String termtext = term.text().trim();
+        /**
+         * Java 1.4 level code:
 
-          /**
-           * Java 1.4 level code:
+         if (pIntegers.matcher(termtext).matches())
+         return IntegerSortedHitQueue.comparator (reader, enumerator, field);
 
-           if (pIntegers.matcher(termtext).matches())
-           return IntegerSortedHitQueue.comparator (reader, enumerator, field);
+         else if (pFloats.matcher(termtext).matches())
+         return FloatSortedHitQueue.comparator (reader, enumerator, field);
+         */
 
-           else if (pFloats.matcher(termtext).matches())
-           return FloatSortedHitQueue.comparator (reader, enumerator, field);
-           */
-
-          // Java 1.3 level code:
+        // Java 1.3 level code:
+        try {
+          Integer.parseInt (termtext);
+          ret = new IntArrayCacheKey(field);
+        } catch (NumberFormatException nfe1) {
           try {
-            Integer.parseInt (termtext);
-            ret = getInts (reader, field);
-          } catch (NumberFormatException nfe1) {
-            try {
-                Float.parseFloat (termtext);
-                ret = getFloats (reader, field);
-              } catch (NumberFormatException nfe3) {
-                ret = getStringIndex (reader, field);
-              }
-          }          
-        } else {
-          throw new RuntimeException ("field \"" + field + "\" does not appear to be indexed");
+              Float.parseFloat (termtext);
+              ret = new FloatArrayCacheKey(field);
+            } catch (NumberFormatException nfe3) {
+              ret = new StringArrayCacheKey(field);
+            }
         }
-        return ret;
-      } finally {
-        enumerator.close();
+      } else {
+        throw new RuntimeException ("field \"" + field + "\" does not appear to be indexed");
       }
+      return ret;
+    } finally {
+      enumerator.close();
     }
-  };
+  }
 
   // inherit javadocs
   public Comparable[] getCustom(IndexReader reader, String field,
       SortComparator comparator) throws IOException {
-    return (Comparable[]) customCache.get(reader, new Entry(field, comparator));
+    
+         return (Comparable[]) 
+           reader.getCachedData(new SortComparatorCacheKey
+                               (field, comparator)).getPayload();
   }
-
-  Cache customCache = new Cache() {
-
-    protected Object createValue(IndexReader reader, Object entryKey)
-        throws IOException {
-      Entry entry = (Entry) entryKey;
-      String field = entry.field;
-      SortComparator comparator = (SortComparator) entry.custom;
-      final Comparable[] retArray = new Comparable[reader.maxDoc()];
-      TermDocs termDocs = reader.termDocs();
-      TermEnum termEnum = reader.terms (new Term (field, ""));
-      try {
-        do {
-          Term term = termEnum.term();
-          if (term==null || term.field() != field) break;
-          Comparable termval = comparator.getComparable (term.text());
-          termDocs.seek (termEnum);
-          while (termDocs.next()) {
-            retArray[termDocs.doc()] = termval;
-          }
-        } while (termEnum.next());
-      } finally {
-        termDocs.close();
-        termEnum.close();
-      }
-      return retArray;
-    }
-  };
   
 }
 
Index: src/java/org/apache/lucene/search/FieldDocSortedHitQueue.java
===================================================================
--- src/java/org/apache/lucene/search/FieldDocSortedHitQueue.java	(revision 642339)
+++ src/java/org/apache/lucene/search/FieldDocSortedHitQueue.java	(working copy)
@@ -134,6 +134,7 @@
         case SortField.STRING:{
 					String s1 = (String) docA.fields[i];
 					String s2 = (String) docB.fields[i];
+					//TODO: update javadoc
 					// null values need to be sorted first, because of how FieldCache.getStringIndex()
 					// works - in that routine, any documents without a value in the given field are
 					// put first.  If both are null, the next SortField is used
@@ -189,6 +190,7 @@
 					throw new RuntimeException ("invalid SortField type: "+type);
         }
       }
+
 			if (fields[i].getReverse()) {
 				c = -c;
 			}
Index: src/java/org/apache/lucene/search/FieldSortedHitQueue.java
===================================================================
--- src/java/org/apache/lucene/search/FieldSortedHitQueue.java	(revision 642339)
+++ src/java/org/apache/lucene/search/FieldSortedHitQueue.java	(working copy)
@@ -18,8 +18,18 @@
  */
 
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.cache.ByteFieldCacheKey;
+import org.apache.lucene.index.cache.CacheKey;
+import org.apache.lucene.index.cache.DoubleFieldCacheKey;
+import org.apache.lucene.index.cache.FieldValues;
+import org.apache.lucene.index.cache.FloatFieldCacheKey;
+import org.apache.lucene.index.cache.IntFieldCacheKey;
+import org.apache.lucene.index.cache.LongFieldCacheKey;
+import org.apache.lucene.index.cache.ShortFieldCacheKey;
+import org.apache.lucene.index.cache.StringFieldCacheKey;
 import org.apache.lucene.util.PriorityQueue;
 
+
 import java.io.IOException;
 import java.text.Collator;
 import java.util.Locale;
@@ -158,28 +168,64 @@
     return fields;
   }
   
+  //TODO: comparators are no longer cached here, just the field data - should return caching but what to do about FieldCacheImpl.Cache?
   static ScoreDocComparator getCachedComparator (IndexReader reader, String field, int type, Locale locale, SortComparatorSource factory)
   throws IOException {
     if (type == SortField.DOC) return ScoreDocComparator.INDEXORDER;
     if (type == SortField.SCORE) return ScoreDocComparator.RELEVANCE;
-    FieldCacheImpl.Entry entry = (factory != null)
-      ? new FieldCacheImpl.Entry (field, factory)
-      : new FieldCacheImpl.Entry (field, type, locale);
-    return (ScoreDocComparator)Comparators.get(reader, entry);
+
+    ScoreDocComparator comparator;
+    switch (type) {
+      case SortField.AUTO:
+        comparator = comparatorAuto (reader, field);
+        break;
+      case SortField.INT:
+        comparator = comparatorInt (reader, field);
+        break;
+      case SortField.FLOAT:
+        comparator = comparatorFloat (reader, field);
+        break;
+      case SortField.LONG:
+        comparator = comparatorLong(reader, field);
+        break;
+      case SortField.DOUBLE:
+        comparator = comparatorDouble(reader, field);
+        break;
+      case SortField.SHORT:
+        comparator = comparatorShort(reader, field);
+        break;
+      case SortField.BYTE:
+        comparator = comparatorByte(reader, field);
+        break;
+      case SortField.STRING:
+        if (locale != null) comparator = comparatorStringLocale (reader, field, locale);
+        else comparator = comparatorString (reader, field);
+        break;
+      case SortField.CUSTOM:
+        comparator = factory.newComparator (reader, field);
+        break;
+      default:
+        throw new RuntimeException ("unknown field type: "+type);
+    }
+    return comparator;
   }
 
   /** Internal cache of comparators. Similar to FieldCache, only
-   *  caches comparators instead of term values. */
+   *  caches comparators instead of term values. 
+   *  @deprecated
+   */
   static final FieldCacheImpl.Cache Comparators = new FieldCacheImpl.Cache() {
 
     protected Object createValue(IndexReader reader, Object entryKey)
         throws IOException {
+      //System.out.println("creating new cache entry for reader:" + reader + " with " + entryKey.getClass().getCanonicalName());
       FieldCacheImpl.Entry entry = (FieldCacheImpl.Entry) entryKey;
       String fieldname = entry.field;
       int type = entry.type;
       Locale locale = entry.locale;
       SortComparatorSource factory = (SortComparatorSource) entry.custom;
       ScoreDocComparator comparator;
+
       switch (type) {
         case SortField.AUTO:
           comparator = comparatorAuto (reader, fieldname);
@@ -219,26 +265,26 @@
    /**
    * Returns a comparator for sorting hits according to a field containing bytes.
    * @param reader  Index to use.
-   * @param fieldname  Fieldable containg integer values.
+   * @param fieldname  Fieldable containing integer values.
    * @return  Comparator for sorting hits.
    * @throws IOException If an error occurs reading the index.
    */
   static ScoreDocComparator comparatorByte(final IndexReader reader, final String fieldname)
   throws IOException {
     final String field = fieldname.intern();
-    final byte[] fieldOrder = FieldCache.DEFAULT.getBytes(reader, field);
+    final FieldValues fieldOrder = (FieldValues) reader.getCachedData(new ByteFieldCacheKey(field)).getPayload();;
     return new ScoreDocComparator() {
 
       public final int compare (final ScoreDoc i, final ScoreDoc j) {
-        final int fi = fieldOrder[i.doc];
-        final int fj = fieldOrder[j.doc];
+        final int fi = ((Byte)fieldOrder.getOrd(i.doc)).byteValue();
+        final int fj = ((Byte)fieldOrder.getOrd(j.doc)).byteValue();
         if (fi < fj) return -1;
         if (fi > fj) return 1;
         return 0;
       }
 
       public Comparable sortValue (final ScoreDoc i) {
-        return new Byte(fieldOrder[i.doc]);
+        return fieldOrder.getOrd(i.doc);
       }
 
       public int sortType() {
@@ -250,26 +296,26 @@
   /**
    * Returns a comparator for sorting hits according to a field containing shorts.
    * @param reader  Index to use.
-   * @param fieldname  Fieldable containg integer values.
+   * @param fieldname  Fieldable containing integer values.
    * @return  Comparator for sorting hits.
    * @throws IOException If an error occurs reading the index.
    */
   static ScoreDocComparator comparatorShort(final IndexReader reader, final String fieldname)
   throws IOException {
     final String field = fieldname.intern();
-    final short[] fieldOrder = FieldCache.DEFAULT.getShorts(reader, field);
+    final FieldValues fieldOrder = (FieldValues) reader.getCachedData(new ShortFieldCacheKey(field)).getPayload();
     return new ScoreDocComparator() {
 
       public final int compare (final ScoreDoc i, final ScoreDoc j) {
-        final int fi = fieldOrder[i.doc];
-        final int fj = fieldOrder[j.doc];
+        final int fi = ((Short)fieldOrder.getOrd(i.doc)).shortValue();
+        final int fj = ((Short)fieldOrder.getOrd(j.doc)).shortValue();
         if (fi < fj) return -1;
         if (fi > fj) return 1;
         return 0;
       }
 
       public Comparable sortValue (final ScoreDoc i) {
-        return new Short(fieldOrder[i.doc]);
+        return fieldOrder.getOrd(i.doc);
       }
 
       public int sortType() {
@@ -281,26 +327,26 @@
   /**
    * Returns a comparator for sorting hits according to a field containing integers.
    * @param reader  Index to use.
-   * @param fieldname  Fieldable containg integer values.
+   * @param fieldname  Fieldable containing integer values.
    * @return  Comparator for sorting hits.
    * @throws IOException If an error occurs reading the index.
    */
   static ScoreDocComparator comparatorInt (final IndexReader reader, final String fieldname)
   throws IOException {
     final String field = fieldname.intern();
-    final int[] fieldOrder = FieldCache.DEFAULT.getInts (reader, field);
+    final FieldValues fieldOrder = (FieldValues) reader.getCachedData(new IntFieldCacheKey(field)).getPayload();
     return new ScoreDocComparator() {
 
       public final int compare (final ScoreDoc i, final ScoreDoc j) {
-        final int fi = fieldOrder[i.doc];
-        final int fj = fieldOrder[j.doc];
+        final int fi = ((Integer)fieldOrder.getOrd(i.doc)).intValue();
+        final int fj = ((Integer)fieldOrder.getOrd(j.doc)).intValue();
         if (fi < fj) return -1;
         if (fi > fj) return 1;
         return 0;
       }
 
       public Comparable sortValue (final ScoreDoc i) {
-        return new Integer (fieldOrder[i.doc]);
+        return fieldOrder.getOrd(i.doc);
       }
 
       public int sortType() {
@@ -312,26 +358,26 @@
   /**
    * Returns a comparator for sorting hits according to a field containing integers.
    * @param reader  Index to use.
-   * @param fieldname  Fieldable containg integer values.
+   * @param fieldname  Fieldable containing integer values.
    * @return  Comparator for sorting hits.
    * @throws IOException If an error occurs reading the index.
    */
   static ScoreDocComparator comparatorLong (final IndexReader reader, final String fieldname)
   throws IOException {
     final String field = fieldname.intern();
-    final long[] fieldOrder = ExtendedFieldCache.EXT_DEFAULT.getLongs (reader, field);
+    final FieldValues fieldOrder = (FieldValues) reader.getCachedData(new LongFieldCacheKey(field)).getPayload();
     return new ScoreDocComparator() {
 
       public final int compare (final ScoreDoc i, final ScoreDoc j) {
-        final long li = fieldOrder[i.doc];
-        final long lj = fieldOrder[j.doc];
+        final long li = ((Long)fieldOrder.getOrd(i.doc)).longValue();
+        final long lj = ((Long)fieldOrder.getOrd(j.doc)).longValue();
         if (li < lj) return -1;
         if (li > lj) return 1;
         return 0;
       }
 
       public Comparable sortValue (final ScoreDoc i) {
-        return new Long(fieldOrder[i.doc]);
+        return fieldOrder.getOrd(i.doc);
       }
 
       public int sortType() {
@@ -344,26 +390,27 @@
   /**
    * Returns a comparator for sorting hits according to a field containing floats.
    * @param reader  Index to use.
-   * @param fieldname  Fieldable containg float values.
+   * @param fieldname  Fieldable containing float values.
    * @return  Comparator for sorting hits.
    * @throws IOException If an error occurs reading the index.
    */
   static ScoreDocComparator comparatorFloat (final IndexReader reader, final String fieldname)
   throws IOException {
     final String field = fieldname.intern();
-    final float[] fieldOrder = FieldCache.DEFAULT.getFloats (reader, field);
+    final FieldValues fieldOrder = (FieldValues)  reader.getCachedData(new FloatFieldCacheKey(field)).getPayload();
+
     return new ScoreDocComparator () {
 
       public final int compare (final ScoreDoc i, final ScoreDoc j) {
-        final float fi = fieldOrder[i.doc];
-        final float fj = fieldOrder[j.doc];
+        final float fi = ((Float)fieldOrder.getOrd(i.doc)).floatValue();
+        final float fj = ((Float)fieldOrder.getOrd(j.doc)).floatValue();
         if (fi < fj) return -1;
         if (fi > fj) return 1;
         return 0;
       }
 
       public Comparable sortValue (final ScoreDoc i) {
-        return new Float (fieldOrder[i.doc]);
+        return fieldOrder.getOrd(i.doc);
       }
 
       public int sortType() {
@@ -375,26 +422,26 @@
   /**
    * Returns a comparator for sorting hits according to a field containing doubles.
    * @param reader  Index to use.
-   * @param fieldname  Fieldable containg float values.
+   * @param fieldname  Fieldable containing float values.
    * @return  Comparator for sorting hits.
    * @throws IOException If an error occurs reading the index.
    */
   static ScoreDocComparator comparatorDouble(final IndexReader reader, final String fieldname)
   throws IOException {
     final String field = fieldname.intern();
-    final double[] fieldOrder = ExtendedFieldCache.EXT_DEFAULT.getDoubles (reader, field);
+    final FieldValues fieldOrder = (FieldValues) reader.getCachedData(new DoubleFieldCacheKey(field)).getPayload();
     return new ScoreDocComparator () {
 
       public final int compare (final ScoreDoc i, final ScoreDoc j) {
-        final double di = fieldOrder[i.doc];
-        final double dj = fieldOrder[j.doc];
+        final double di = ((Double)fieldOrder.getOrd(i.doc)).doubleValue();
+        final double dj = ((Double)fieldOrder.getOrd(j.doc)).doubleValue();
         if (di < dj) return -1;
         if (di > dj) return 1;
         return 0;
       }
 
       public Comparable sortValue (final ScoreDoc i) {
-        return new Double (fieldOrder[i.doc]);
+        return fieldOrder.getOrd(i.doc);
       }
 
       public int sortType() {
@@ -406,26 +453,28 @@
   /**
    * Returns a comparator for sorting hits according to a field containing strings.
    * @param reader  Index to use.
-   * @param fieldname  Fieldable containg string values.
+   * @param fieldname  Fieldable containing string values.
    * @return  Comparator for sorting hits.
    * @throws IOException If an error occurs reading the index.
    */
   static ScoreDocComparator comparatorString (final IndexReader reader, final String fieldname)
   throws IOException {
     final String field = fieldname.intern();
-    final FieldCache.StringIndex index = FieldCache.DEFAULT.getStringIndex (reader, field);
+    final FieldValues data = (FieldValues) 
+    reader.getCachedData(new StringFieldCacheKey(field)).getPayload();
+ 
     return new ScoreDocComparator () {
 
       public final int compare (final ScoreDoc i, final ScoreDoc j) {
-        final int fi = index.order[i.doc];
-        final int fj = index.order[j.doc];
+        final int fi = ((Integer)data.getOrd(i.doc)).intValue();
+        final int fj = ((Integer)data.getOrd(j.doc)).intValue();
         if (fi < fj) return -1;
         if (fi > fj) return 1;
         return 0;
       }
 
       public Comparable sortValue (final ScoreDoc i) {
-        return index.lookup[index.order[i.doc]];
+        return (String)data.getValue(i.doc);
       }
 
       public int sortType() {
@@ -437,7 +486,7 @@
   /**
    * Returns a comparator for sorting hits according to a field containing strings.
    * @param reader  Index to use.
-   * @param fieldname  Fieldable containg string values.
+   * @param fieldname  Fieldable containing string values.
    * @return  Comparator for sorting hits.
    * @throws IOException If an error occurs reading the index.
    */
@@ -445,25 +494,26 @@
   throws IOException {
     final Collator collator = Collator.getInstance (locale);
     final String field = fieldname.intern();
-    final String[] index = FieldCache.DEFAULT.getStrings (reader, field);
+
+    final FieldValues data = (FieldValues)  reader.getCachedData(new StringFieldCacheKey(field)).getPayload();
     return new ScoreDocComparator() {
 
     	public final int compare(final ScoreDoc i, final ScoreDoc j) {
-			String is = index[i.doc];
-			String js = index[j.doc];
-			if (is == js) {
-				return 0;
-			} else if (is == null) {
-				return -1;
-			} else if (js == null) {
-				return 1;
-			} else {
-				return collator.compare(is, js);
-			}
-		}
+    	  String is = ((String)data.getValue(i.doc));
+    	  String js = ((String)data.getValue(j.doc));
+    	  if (is == js) {
+    	    return 0;
+    	  } else if (is == null) {
+    	    return -1;
+    	  } else if (js == null) {
+    	    return 1;
+    	  } else {
+    	    return collator.compare(is, js);
+    	  }
+    	}
 
       public Comparable sortValue (final ScoreDoc i) {
-        return index[i.doc];
+        return data.getValue(i.doc);
       }
 
       public int sortType() {
@@ -478,24 +528,23 @@
    * floats or strings.  Once the type is determined, one of the other static methods
    * in this class is called to get the comparator.
    * @param reader  Index to use.
-   * @param fieldname  Fieldable containg values.
+   * @param fieldname  Fieldable containing values.
    * @return  Comparator for sorting hits.
    * @throws IOException If an error occurs reading the index.
    */
   static ScoreDocComparator comparatorAuto (final IndexReader reader, final String fieldname)
   throws IOException {
     final String field = fieldname.intern();
-    Object lookupArray = ExtendedFieldCache.EXT_DEFAULT.getAuto (reader, field);
-    if (lookupArray instanceof FieldCache.StringIndex) {
+    Object lookupArray = CacheKey.getAutoCacheKey(reader, field);
+    //System.out.println(lookupArray.getClass().getCanonicalName());
+    if (lookupArray instanceof StringFieldCacheKey) {
       return comparatorString (reader, field);
-    } else if (lookupArray instanceof int[]) {
+    } else if (lookupArray instanceof IntFieldCacheKey) {
       return comparatorInt (reader, field);
-    } else if (lookupArray instanceof long[]) {
+    } else if (lookupArray instanceof LongFieldCacheKey) {
       return comparatorLong (reader, field);
-    } else if (lookupArray instanceof float[]) {
+    } else if (lookupArray instanceof FloatFieldCacheKey) {
       return comparatorFloat (reader, field);
-    } else if (lookupArray instanceof String[]) {
-      return comparatorString (reader, field);
     } else {
       throw new RuntimeException ("unknown data type in field '"+field+"'");
     }
Index: src/java/org/apache/lucene/search/function/ByteFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/ByteFieldSource.java	(revision 642339)
+++ src/java/org/apache/lucene/search/function/ByteFieldSource.java	(working copy)
@@ -36,6 +36,7 @@
  * 
  * @see org.apache.lucene.search.function.FieldCacheSource for requirements 
  * on the field. 
+ * TODO: deprecated methods/classes used
  */
 public class ByteFieldSource extends FieldCacheSource {
   private FieldCache.ByteParser parser;
Index: src/java/org/apache/lucene/search/function/FloatFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/FloatFieldSource.java	(revision 642339)
+++ src/java/org/apache/lucene/search/function/FloatFieldSource.java	(working copy)
@@ -38,6 +38,8 @@
  * on the field.
  *  
  * @author yonik
+ * 
+ * TODO: deprecated methods/classes used
  */
 public class FloatFieldSource extends FieldCacheSource {
   private FieldCache.FloatParser parser;
Index: src/java/org/apache/lucene/search/function/IntFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/IntFieldSource.java	(revision 642339)
+++ src/java/org/apache/lucene/search/function/IntFieldSource.java	(working copy)
@@ -37,7 +37,7 @@
  * @see org.apache.lucene.search.function.FieldCacheSource for requirements 
  * on the field.
  *
- *
+ *  TODO: deprecated methods/classes used
  */
 public class IntFieldSource extends FieldCacheSource {
   private FieldCache.IntParser parser;
Index: src/java/org/apache/lucene/search/function/OrdFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/OrdFieldSource.java	(revision 642339)
+++ src/java/org/apache/lucene/search/function/OrdFieldSource.java	(working copy)
@@ -45,6 +45,8 @@
  * supported anymore in such a case.</font>
  * 
  * @author yonik
+ * 
+ * TODO: deprecated methods/classes used
  */
 
 public class OrdFieldSource extends ValueSource {
Index: src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java	(revision 642339)
+++ src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java	(working copy)
@@ -46,6 +46,7 @@
  * supported anymore in such a case.</font>
  * 
  * @author yonik
+ * TODO: deprecated methods/classes used
  */
 
 public class ReverseOrdFieldSource extends ValueSource {
Index: src/java/org/apache/lucene/search/function/ShortFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/ShortFieldSource.java	(revision 642339)
+++ src/java/org/apache/lucene/search/function/ShortFieldSource.java	(working copy)
@@ -36,6 +36,7 @@
  * 
  * @see org.apache.lucene.search.function.FieldCacheSource for requirements 
  * on the field.
+ * TODO: deprecated methods/classes used
  */
 public class ShortFieldSource extends FieldCacheSource {
   private FieldCache.ShortParser parser;
Index: src/java/org/apache/lucene/search/SortComparator.java
===================================================================
--- src/java/org/apache/lucene/search/SortComparator.java	(revision 642339)
+++ src/java/org/apache/lucene/search/SortComparator.java	(working copy)
@@ -46,7 +46,9 @@
   public ScoreDocComparator newComparator (final IndexReader reader, final String fieldname)
   throws IOException {
     final String field = fieldname.intern();
-    final Comparable[] cachedValues = FieldCache.DEFAULT.getCustom (reader, field, SortComparator.this);
+    final Comparable[] cachedValues =  (Comparable[]) 
+    reader.getCachedData(new SortComparatorCacheKey
+        (field, SortComparator.this)).getPayload();
     
     return new ScoreDocComparator() {
 
Index: src/java/org/apache/lucene/search/SortComparatorCacheKey.java
===================================================================
--- src/java/org/apache/lucene/search/SortComparatorCacheKey.java	(revision 0)
+++ src/java/org/apache/lucene/search/SortComparatorCacheKey.java	(revision 0)
@@ -0,0 +1,91 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.cache.*;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+
+import org.apache.lucene.search.SortComparator;
+
+import java.io.IOException;
+
+/* :TODO: javadocs 
+ * :TODO: setup with FieldValues for faster reopen
+ * :TODO: this class must live in o.a.l.search because of access level of comparator.getComparable
+ */
+public class SortComparatorCacheKey extends CacheKey {
+
+  String field;
+  SortComparator comparator;
+    
+  public SortComparatorCacheKey(String f, SortComparator c) {
+    field = f.intern();
+    comparator = c;
+  }
+  
+  public int hashCode() {
+    final int prime = 31;
+    int result = prime + ((comparator == null) ? 0 : comparator.hashCode());
+    result = prime * result + ((field == null) ? 0 : field.hashCode());
+    return result;
+  }
+
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (getClass() != obj.getClass())
+      return false;
+    final SortComparatorCacheKey other = (SortComparatorCacheKey) obj;
+    if (comparator == null) {
+      if (other.comparator != null)
+        return false;
+    } else if (!comparator.equals(other.comparator))
+      return false;
+    if (field == null) {
+      if (other.field != null)
+        return false;
+    } else if (!field.equals(other.field))
+      return false;
+    return true;
+  }
+
+  public CacheData buildData(IndexReader reader) throws IOException {
+      
+    final Comparable[] retArray = new Comparable[reader.maxDoc()];
+    TermDocs termDocs = reader.termDocs();
+    TermEnum termEnum = reader.terms (new Term (field, ""));
+    try {
+      do {
+        Term term = termEnum.term();
+        if (term==null || term.field() != field) break;
+        Comparable termval = comparator.getComparable (term.text());
+        termDocs.seek (termEnum);
+        while (termDocs.next()) {
+          retArray[termDocs.doc()] = termval;
+        }
+      } while (termEnum.next());
+    } finally {
+      termDocs.close();
+      termEnum.close();
+      }
+    return new CacheData(retArray);
+  }
+}
