Index: solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java =================================================================== --- solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java (revision 1081496) +++ solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java (working copy) @@ -38,7 +38,6 @@ import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.SolrQueryResponse; import org.apache.solr.schema.FieldType; -import org.apache.solr.util.ByteUtils; import org.apache.noggit.CharArr; @@ -141,12 +140,12 @@ final Set tokens = new HashSet(); final TokenStream tokenStream = analyzer.tokenStream("", new StringReader(query)); final TermToBytesRefAttribute bytesAtt = tokenStream.getAttribute(TermToBytesRefAttribute.class); + final BytesRef bytes = bytesAtt.getBytesRef(); try { tokenStream.reset(); while (tokenStream.incrementToken()) { - final BytesRef bytes = new BytesRef(); - bytesAtt.toBytesRef(bytes); - tokens.add(bytes); + bytesAtt.fillBytesRef(); + tokens.add(new BytesRef(bytes)); } } catch (IOException ioe) { throw new RuntimeException("Error occured while iterating over tokenstream", ioe); @@ -236,12 +235,13 @@ FieldType fieldType = context.getFieldType(); - final BytesRef rawBytes = new BytesRef(); final CharArr textBuf = new CharArr(); for (int i = 0, c = tokens.size(); i < c; i++) { AttributeSource token = tokens.get(i); final NamedList tokenNamedList = new SimpleOrderedMap(); - token.getAttribute(TermToBytesRefAttribute.class).toBytesRef(rawBytes); + final TermToBytesRefAttribute termAtt = token.getAttribute(TermToBytesRefAttribute.class); + BytesRef rawBytes = termAtt.getBytesRef(); + termAtt.fillBytesRef(); textBuf.reset(); fieldType.indexedToReadable(rawBytes, textBuf); Index: solr/src/webapp/web/admin/analysis.jsp =================================================================== --- solr/src/webapp/web/admin/analysis.jsp (revision 1081496) +++ solr/src/webapp/web/admin/analysis.jsp (working copy) @@ -156,10 +156,10 @@ TermToBytesRefAttribute bytesAtt = tstream.getAttribute(TermToBytesRefAttribute.class); tstream.reset(); matches = new HashSet(); + final BytesRef bytes = bytesAtt.getBytesRef(); while (tstream.incrementToken()) { - final BytesRef bytes = new BytesRef(); - bytesAtt.toBytesRef(bytes); - matches.add(bytes); + bytesAtt.fillBytesRef(); + matches.add(new BytesRef(bytes)); } } @@ -273,14 +273,17 @@ } private static class Tok { - final BytesRef bytes = new BytesRef(); + final BytesRef bytes; final String rawText, text; final int pos; final List reflected = new ArrayList(); Tok(AttributeSource token, int pos, FieldType ft) { this.pos = pos; - token.getAttribute(TermToBytesRefAttribute.class).toBytesRef(bytes); + TermToBytesRefAttribute termAtt = token.getAttribute(TermToBytesRefAttribute.class); + BytesRef spare = termAtt.getBytesRef(); + termAtt.fillBytesRef(); + bytes = new BytesRef(spare); rawText = (token.hasAttribute(CharTermAttribute.class)) ? token.getAttribute(CharTermAttribute.class).toString() : null; final CharArr textBuf = new CharArr(bytes.length); Index: modules/analysis/icu/src/java/org/apache/lucene/collation/tokenattributes/ICUCollatedTermAttributeImpl.java =================================================================== --- modules/analysis/icu/src/java/org/apache/lucene/collation/tokenattributes/ICUCollatedTermAttributeImpl.java (revision 1081496) +++ modules/analysis/icu/src/java/org/apache/lucene/collation/tokenattributes/ICUCollatedTermAttributeImpl.java (working copy) @@ -30,7 +30,7 @@ public class ICUCollatedTermAttributeImpl extends CharTermAttributeImpl { private final Collator collator; private final RawCollationKey key = new RawCollationKey(); - + /** * Create a new ICUCollatedTermAttributeImpl * @param collator Collation key generator @@ -43,13 +43,14 @@ throw new RuntimeException(e); } } - + @Override - public int toBytesRef(BytesRef target) { + public int fillBytesRef() { + BytesRef bytes = getBytesRef(); collator.getRawCollationKey(toString(), key); - target.bytes = key.bytes; - target.offset = 0; - target.length = key.size; - return target.hashCode(); + bytes.bytes = key.bytes; + bytes.offset = 0; + bytes.length = key.size; + return bytes.hashCode(); } } Index: modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java (revision 1081496) +++ modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java (working copy) @@ -283,7 +283,6 @@ int numTestPoints = 100; int numThreads = _TestUtil.nextInt(random, 3, 5); final HashMap map = new HashMap(); - BytesRef spare = new BytesRef(); // create a map up front. // then with multiple threads, generate sort keys for all the keys in the map @@ -292,12 +291,13 @@ for (int i = 0; i < numTestPoints; i++) { String term = randomString(); TokenStream ts = analyzer.reusableTokenStream("fake", new StringReader(term)); - TermToBytesRefAttribute bytes = ts.addAttribute(TermToBytesRefAttribute.class); + TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); + BytesRef bytes = termAtt.getBytesRef(); ts.reset(); assertTrue(ts.incrementToken()); - bytes.toBytesRef(spare); + termAtt.fillBytesRef(); // ensure we make a copy of the actual bytes too - map.put(term, new BytesRef(spare)); + map.put(term, new BytesRef(bytes)); } Thread threads[] = new Thread[numThreads]; @@ -306,16 +306,16 @@ @Override public void run() { try { - BytesRef spare = new BytesRef(); for (Map.Entry mapping : map.entrySet()) { String term = mapping.getKey(); BytesRef expected = mapping.getValue(); TokenStream ts = analyzer.reusableTokenStream("fake", new StringReader(term)); - TermToBytesRefAttribute bytes = ts.addAttribute(TermToBytesRefAttribute.class); + TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); + BytesRef bytes = termAtt.getBytesRef(); ts.reset(); assertTrue(ts.incrementToken()); - bytes.toBytesRef(spare); - assertEquals(expected, spare); + termAtt.fillBytesRef(); + assertEquals(expected, bytes); } } catch (IOException e) { throw new RuntimeException(e); Index: modules/analysis/common/src/java/org/apache/lucene/collation/tokenattributes/CollatedTermAttributeImpl.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/collation/tokenattributes/CollatedTermAttributeImpl.java (revision 1081496) +++ modules/analysis/common/src/java/org/apache/lucene/collation/tokenattributes/CollatedTermAttributeImpl.java (working copy) @@ -40,11 +40,12 @@ } @Override - public int toBytesRef(BytesRef target) { - target.bytes = collator.getCollationKey(toString()).toByteArray(); - target.offset = 0; - target.length = target.bytes.length; - return target.hashCode(); + public int fillBytesRef() { + BytesRef bytes = getBytesRef(); + bytes.bytes = collator.getCollationKey(toString()).toByteArray(); + bytes.offset = 0; + bytes.length = bytes.bytes.length; + return bytes.hashCode(); } } Index: lucene/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java =================================================================== --- lucene/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java (revision 1081496) +++ lucene/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java (working copy) @@ -35,13 +35,13 @@ final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class); final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class); final NumericTokenStream.NumericTermAttribute numericAtt = stream.getAttribute(NumericTokenStream.NumericTermAttribute.class); - final BytesRef bytes = new BytesRef(); + final BytesRef bytes = bytesAtt.getBytesRef(); stream.reset(); assertEquals(64, numericAtt.getValueSize()); for (int shift=0; shift<64; shift+=NumericUtils.PRECISION_STEP_DEFAULT) { assertTrue("New token is available", stream.incrementToken()); assertEquals("Shift value wrong", shift, numericAtt.getShift()); - final int hash = bytesAtt.toBytesRef(bytes); + final int hash = bytesAtt.fillBytesRef(); assertEquals("Hash incorrect", bytes.hashCode(), hash); assertEquals("Term is incorrectly encoded", lvalue & ~((1L << shift) - 1L), NumericUtils.prefixCodedToLong(bytes)); assertEquals("Term raw value is incorrectly encoded", lvalue & ~((1L << shift) - 1L), numericAtt.getRawValue()); @@ -58,13 +58,13 @@ final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class); final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class); final NumericTokenStream.NumericTermAttribute numericAtt = stream.getAttribute(NumericTokenStream.NumericTermAttribute.class); - final BytesRef bytes = new BytesRef(); + final BytesRef bytes = bytesAtt.getBytesRef(); stream.reset(); assertEquals(32, numericAtt.getValueSize()); for (int shift=0; shift<32; shift+=NumericUtils.PRECISION_STEP_DEFAULT) { assertTrue("New token is available", stream.incrementToken()); assertEquals("Shift value wrong", shift, numericAtt.getShift()); - final int hash = bytesAtt.toBytesRef(bytes); + final int hash = bytesAtt.fillBytesRef(); assertEquals("Hash incorrect", bytes.hashCode(), hash); assertEquals("Term is incorrectly encoded", ivalue & ~((1 << shift) - 1), NumericUtils.prefixCodedToInt(bytes)); assertEquals("Term raw value is incorrectly encoded", ((long) ivalue) & ~((1L << shift) - 1L), numericAtt.getRawValue()); Index: lucene/src/test/org/apache/lucene/index/Test2BTerms.java =================================================================== --- lucene/src/test/org/apache/lucene/index/Test2BTerms.java (revision 1081496) +++ lucene/src/test/org/apache/lucene/index/Test2BTerms.java (working copy) @@ -74,12 +74,14 @@ } private final static class MyTermAttributeImpl extends AttributeImpl implements TermToBytesRefAttribute { - public int toBytesRef(BytesRef bs) { - bs.bytes = bytes.bytes; - bs.offset = bytes.offset; - bs.length = bytes.length; + public int fillBytesRef() { return bytes.hashCode(); } + + public BytesRef getBytesRef() { + return bytes; + } + @Override public void clear() { } Index: lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java =================================================================== --- lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java (revision 1081496) +++ lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java (working copy) @@ -532,18 +532,19 @@ // ignore } + BytesRef bytes = termAtt == null ? null : termAtt.getBytesRef(); + if (numTokens == 0) return null; else if (numTokens == 1) { - BytesRef term = new BytesRef(); try { boolean hasNext = buffer.incrementToken(); assert hasNext == true; - termAtt.toBytesRef(term); + termAtt.fillBytesRef(); } catch (IOException e) { // safe to ignore, because we know the number of tokens } - return newTermQuery(new Term(field, term)); + return newTermQuery(new Term(field, new BytesRef(bytes))); } else { if (severalTokensAtSamePosition || (!quoted && !autoGeneratePhraseQueries)) { if (positionCount == 1 || (!quoted && !autoGeneratePhraseQueries)) { @@ -554,17 +555,15 @@ BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD; for (int i = 0; i < numTokens; i++) { - BytesRef term = new BytesRef(); try { boolean hasNext = buffer.incrementToken(); assert hasNext == true; - termAtt.toBytesRef(term); + termAtt.fillBytesRef(); } catch (IOException e) { // safe to ignore, because we know the number of tokens } - Query currentQuery = newTermQuery( - new Term(field, term)); + new Term(field, new BytesRef(bytes))); q.add(currentQuery, occur); } return q; @@ -576,12 +575,11 @@ List multiTerms = new ArrayList(); int position = -1; for (int i = 0; i < numTokens; i++) { - BytesRef term = new BytesRef(); int positionIncrement = 1; try { boolean hasNext = buffer.incrementToken(); assert hasNext == true; - termAtt.toBytesRef(term); + termAtt.fillBytesRef(); if (posIncrAtt != null) { positionIncrement = posIncrAtt.getPositionIncrement(); } @@ -598,7 +596,7 @@ multiTerms.clear(); } position += positionIncrement; - multiTerms.add(new Term(field, term)); + multiTerms.add(new Term(field, new BytesRef(bytes))); } if (enablePositionIncrements) { mpq.add(multiTerms.toArray(new Term[0]),position); @@ -613,15 +611,13 @@ pq.setSlop(phraseSlop); int position = -1; - for (int i = 0; i < numTokens; i++) { - BytesRef term = new BytesRef(); int positionIncrement = 1; try { boolean hasNext = buffer.incrementToken(); assert hasNext == true; - termAtt.toBytesRef(term); + termAtt.fillBytesRef(); if (posIncrAtt != null) { positionIncrement = posIncrAtt.getPositionIncrement(); } @@ -631,9 +627,9 @@ if (enablePositionIncrements) { position += positionIncrement; - pq.add(new Term(field, term),position); + pq.add(new Term(field, new BytesRef(bytes)),position); } else { - pq.add(new Term(field, term)); + pq.add(new Term(field, new BytesRef(bytes))); } } return pq; @@ -796,13 +792,13 @@ source = analyzer.tokenStream(field, new StringReader(part)); } - BytesRef result = new BytesRef(); TermToBytesRefAttribute termAtt = source.getAttribute(TermToBytesRefAttribute.class); - + BytesRef bytes = termAtt.getBytesRef(); + try { if (!source.incrementToken()) throw new IllegalArgumentException("analyzer returned no terms for range part: " + part); - termAtt.toBytesRef(result); + termAtt.fillBytesRef(); if (source.incrementToken()) throw new IllegalArgumentException("analyzer returned too many terms for range part: " + part); } catch (IOException e) { @@ -812,8 +808,9 @@ try { source.close(); } catch (IOException ignored) {} - - return result; + + // make a copy of the bytes, we might not "own" them and TermRangeQuery doesn't copy. + return new BytesRef(bytes); } /** Index: lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java =================================================================== --- lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java (revision 1081496) +++ lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java (working copy) @@ -142,8 +142,13 @@ public static final class NumericTermAttributeImpl extends AttributeImpl implements NumericTermAttribute,TermToBytesRefAttribute { private long value = 0L; private int valueSize = 0, shift = 0, precisionStep = 0; + private BytesRef bytes = new BytesRef(); + + public BytesRef getBytesRef() { + return bytes; + } - public int toBytesRef(BytesRef bytes) { + public int fillBytesRef() { try { assert valueSize == 64 || valueSize == 32; return (valueSize == 64) ? @@ -180,8 +185,7 @@ @Override public void reflectWith(AttributeReflector reflector) { - final BytesRef bytes = new BytesRef(); - toBytesRef(bytes); + fillBytesRef(); reflector.reflect(TermToBytesRefAttribute.class, "bytes", bytes); reflector.reflect(NumericTermAttribute.class, "shift", shift); reflector.reflect(NumericTermAttribute.class, "rawValue", getRawValue()); Index: lucene/src/java/org/apache/lucene/analysis/tokenattributes/TermToBytesRefAttribute.java =================================================================== --- lucene/src/java/org/apache/lucene/analysis/tokenattributes/TermToBytesRefAttribute.java (revision 1081496) +++ lucene/src/java/org/apache/lucene/analysis/tokenattributes/TermToBytesRefAttribute.java (working copy) @@ -22,18 +22,39 @@ /** * This attribute is requested by TermsHashPerField to index the contents. - * This attribute has no real state, it should be implemented in addition to - * {@link CharTermAttribute}, to support indexing the term text as - * UTF-8 bytes. + * This attribute can be used to customize the final byte[] encoding of terms. + *

+ * Consumers of this attribute call {@link #getBytesRef()} up-front, and then + * invoke {@link #fillBytesRef()} for each term. Example: + *

+ *   final TermToBytesRefAttribute termAtt = tokenStream.getAttribute(TermToBytesRefAttribute.class);
+ *   final BytesRef bytes = termAtt.getBytesRef();
+ *
+ *   while (termAtt.incrementToken() {
+ *
+ *     // you must call termAtt.fillBytesRef() before doing something with the bytes.
+ *     // this encodes the term value (internally it might be a char[], etc) into the bytes.
+ *     int hashCode = termAtt.fillBytesRef();
+ *
+ *     if (isInteresting(bytes)) {
+ *     
+ *       // because the bytes are reused by the attribute (like CharTermAttribute's char[] buffer),
+ *       // you should make a copy if you need persistent access to the bytes, otherwise they will
+ *       // be rewritten across calls to incrementToken()
+ *
+ *       doSomethingWith(new BytesRef(bytes));
+ *     }
+ *   }
+ *   ...
+ * 
* @lucene.experimental This is a very expert API, please use * {@link CharTermAttributeImpl} and its implementation of this method * for UTF-8 terms. */ public interface TermToBytesRefAttribute extends Attribute { - /** Copies the token's term text into the given {@link BytesRef}. - * @param termBytes destination to write the bytes to (UTF-8 for text terms). - * The length of the BytesRef's buffer may be not large enough, so you need to grow. - * The parameters' {@code bytes} is guaranteed to be not {@code null}. + /** + * Updates the bytes {@link #getBytesRef()} to contain this term's + * final encoding, and returns its hashcode. * @return the hashcode as defined by {@link BytesRef#hashCode}: *
    *  int hash = 0;
@@ -45,5 +66,12 @@
    * the hash on-the-fly. If this is not the case, just return
    * {@code termBytes.hashCode()}.
    */
-  public int toBytesRef(BytesRef termBytes);
+  public int fillBytesRef();
+  
+  /**
+   * Retrieve this attribute's BytesRef. The bytes are updated 
+   * from the current term when the consumer calls {@link #fillBytesRef()}.
+   * @return this Attributes internal BytesRef.
+   */
+  public BytesRef getBytesRef();
 }
Index: lucene/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java
===================================================================
--- lucene/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java	(revision 1081496)
+++ lucene/src/java/org/apache/lucene/analysis/tokenattributes/CharTermAttributeImpl.java	(working copy)
@@ -77,9 +77,17 @@
   }
   
   // *** TermToBytesRefAttribute interface ***
-  public int toBytesRef(BytesRef target) {
-    return UnicodeUtil.UTF16toUTF8WithHash(termBuffer, 0, termLength, target);
+  private BytesRef bytes = new BytesRef(MIN_BUFFER_SIZE);
+
+  @Override
+  public int fillBytesRef() {
+    return UnicodeUtil.UTF16toUTF8WithHash(termBuffer, 0, termLength, bytes);
   }
+
+  @Override
+  public BytesRef getBytesRef() {
+    return bytes;
+  }
   
   // *** CharSequence interface ***
   public final int length() {
@@ -205,6 +213,7 @@
     // Do a deep clone
     t.termBuffer = new char[this.termLength];
     System.arraycopy(this.termBuffer, 0, t.termBuffer, 0, this.termLength);
+    t.bytes = new BytesRef(bytes);
     return t;
   }
   
@@ -246,8 +255,7 @@
   @Override
   public void reflectWith(AttributeReflector reflector) {
     reflector.reflect(CharTermAttribute.class, "term", toString());
-    final BytesRef bytes = new BytesRef();
-    toBytesRef(bytes);
+    fillBytesRef();
     reflector.reflect(TermToBytesRefAttribute.class, "bytes", bytes);
   }
   
Index: lucene/src/java/org/apache/lucene/search/QueryTermVector.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/QueryTermVector.java	(revision 1081496)
+++ lucene/src/java/org/apache/lucene/search/QueryTermVector.java	(working copy)
@@ -66,10 +66,10 @@
           final TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class);
 
           hasMoreTokens = stream.incrementToken();
+          BytesRef bytes = termAtt.getBytesRef();
           while (hasMoreTokens) {
-            BytesRef bytes = new BytesRef();
-            termAtt.toBytesRef(bytes);
-            terms.add(bytes);
+            termAtt.fillBytesRef();
+            terms.add(new BytesRef(bytes));
             hasMoreTokens = stream.incrementToken();
           }
           processTerms(terms.toArray(new BytesRef[terms.size()]));
Index: lucene/src/java/org/apache/lucene/index/TermsHashPerField.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/TermsHashPerField.java	(revision 1081496)
+++ lucene/src/java/org/apache/lucene/index/TermsHashPerField.java	(working copy)
@@ -39,6 +39,7 @@
   final DocumentsWriter.DocState docState;
   final FieldInvertState fieldState;
   TermToBytesRefAttribute termAtt;
+  BytesRef termBytesRef;
 
   // Copied from our perThread
   final IntBlockPool intPool;
@@ -53,7 +54,6 @@
   final BytesRefHash bytesHash;
  
   ParallelPostingsArray postingsArray;
-  private final BytesRef termBytesRef;
   private final AtomicLong bytesUsed;
 
   public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHashPerThread perThread, final TermsHashPerThread nextPerThread, final FieldInfo fieldInfo) {
@@ -70,7 +70,6 @@
     bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts); 
     streamCount = consumer.getStreamCount();
     numPostingInt = 2*streamCount;
-    termBytesRef = perThread.termBytesRef;
     this.fieldInfo = fieldInfo;
     if (nextPerThread != null)
       nextPerField = (TermsHashPerField) nextPerThread.addField(docInverterPerField, fieldInfo);
@@ -119,6 +118,7 @@
   @Override
   void start(Fieldable f) {
     termAtt = fieldState.attributeSource.getAttribute(TermToBytesRefAttribute.class);
+    termBytesRef = termAtt.getBytesRef();
     consumer.start(f);
     if (nextPerField != null) {
       nextPerField.start(f);
@@ -181,7 +181,7 @@
     // Get the text & hash of this term.
     int termID;
     try{
-       termID = bytesHash.add(termBytesRef, termAtt.toBytesRef(termBytesRef));
+       termID = bytesHash.add(termBytesRef, termAtt.fillBytesRef());
     }catch (MaxBytesLengthExceededException e) {
       // Not enough room in current block
       // Just skip this term, to remain as robust as
Index: lucene/src/java/org/apache/lucene/index/TermsHashPerThread.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/TermsHashPerThread.java	(revision 1081496)
+++ lucene/src/java/org/apache/lucene/index/TermsHashPerThread.java	(working copy)
@@ -18,7 +18,6 @@
  */
 
 import org.apache.lucene.util.ByteBlockPool;
-import org.apache.lucene.util.BytesRef;
 
 import java.io.IOException;
 
@@ -35,8 +34,6 @@
   
   final boolean primary;
   final DocumentsWriter.DocState docState;
-  // Used by perField to obtain terms from the analysis chain
-  final BytesRef termBytesRef = new BytesRef(10);
 
   public TermsHashPerThread(DocInverterPerThread docInverterPerThread, final TermsHash termsHash, final TermsHash nextTermsHash, final TermsHashPerThread primaryPerThread) {
     docState = docInverterPerThread.docState;
Index: lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/TermsFilterBuilder.java
===================================================================
--- lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/TermsFilterBuilder.java	(revision 1081496)
+++ lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/TermsFilterBuilder.java	(working copy)
@@ -63,16 +63,16 @@
 		try
 		{
 			Term term = null;
+      BytesRef bytes = termAtt.getBytesRef();
 	      while (ts.incrementToken()) {
-	        BytesRef bytes = new BytesRef();
-	        termAtt.toBytesRef(bytes);
+	        termAtt.fillBytesRef();
 				if (term == null)
 				{
-					term = new Term(fieldName, bytes);
+					term = new Term(fieldName, new BytesRef(bytes));
 				} else
 				{
 //					 create from previous to save fieldName.intern overhead
-					term = term.createTerm(bytes); 
+					term = term.createTerm(new BytesRef(bytes)); 
 				}
 				tf.addTerm(term);
 			}
Index: lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/SpanOrTermsBuilder.java
===================================================================
--- lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/SpanOrTermsBuilder.java	(revision 1081496)
+++ lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/SpanOrTermsBuilder.java	(working copy)
@@ -58,11 +58,10 @@
 			ArrayList clausesList=new ArrayList();
 			TokenStream ts=analyzer.tokenStream(fieldName,new StringReader(value));
 			TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class);
-			
+      BytesRef bytes = termAtt.getBytesRef();
 	    while (ts.incrementToken()) {
-	        BytesRef term = new BytesRef();
-	        termAtt.toBytesRef(term);
-			    SpanTermQuery stq=new SpanTermQuery(new Term(fieldName, term));
+	        termAtt.fillBytesRef();
+			    SpanTermQuery stq=new SpanTermQuery(new Term(fieldName, new BytesRef(bytes)));
 			    clausesList.add(stq);
 			}
 			SpanOrQuery soq=new SpanOrQuery(clausesList.toArray(new SpanQuery[clausesList.size()]));
Index: lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/TermsQueryBuilder.java
===================================================================
--- lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/TermsQueryBuilder.java	(revision 1081496)
+++ lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/TermsQueryBuilder.java	(working copy)
@@ -60,16 +60,16 @@
 		{
 		  TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class);
 			Term term = null;
+      BytesRef bytes = termAtt.getBytesRef();
 			while (ts.incrementToken()) {
-        BytesRef bytes = new BytesRef();
-        termAtt.toBytesRef(bytes);
+        termAtt.fillBytesRef();
 				if (term == null)
 				{
-					term = new Term(fieldName, bytes);
+					term = new Term(fieldName, new BytesRef(bytes));
 				} else
 				{
 //					 create from previous to save fieldName.intern overhead
-					term = term.createTerm(bytes); 
+					term = term.createTerm(new BytesRef(bytes)); 
 				}
 				bq.add(new BooleanClause(new TermQuery(term),BooleanClause.Occur.SHOULD));
 			}
Index: lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
===================================================================
--- lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java	(revision 1081496)
+++ lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java	(working copy)
@@ -353,10 +353,10 @@
       TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class);
       PositionIncrementAttribute posIncrAttribute = stream.addAttribute(PositionIncrementAttribute.class);
       OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class);
-      BytesRef ref = new BytesRef(10);
+      BytesRef ref = termAtt.getBytesRef();
       stream.reset();
       while (stream.incrementToken()) {
-        termAtt.toBytesRef(ref);
+        termAtt.fillBytesRef();
         if (ref.length == 0) continue; // nothing to do
 //        if (DEBUG) System.err.println("token='" + term + "'");
         numTokens++;