Index: solr/src/java/org/apache/solr/update/DirectUpdateHandler.java =================================================================== --- solr/src/java/org/apache/solr/update/DirectUpdateHandler.java (revision 957599) +++ solr/src/java/org/apache/solr/update/DirectUpdateHandler.java (working copy) @@ -118,7 +118,7 @@ DocsEnum tdocs = MultiFields.getTermDocsEnum(ir, MultiFields.getDeletedDocs(ir), idTerm.field(), - new BytesRef(idTerm.text())); + idTerm.bytes()); if (tdocs != null) { return tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS; } else { Index: solr/src/java/org/apache/solr/search/SolrIndexSearcher.java =================================================================== --- solr/src/java/org/apache/solr/search/SolrIndexSearcher.java (revision 957599) +++ solr/src/java/org/apache/solr/search/SolrIndexSearcher.java (working copy) @@ -480,7 +480,7 @@ if (fields == null) return -1; Terms terms = fields.terms(t.field()); if (terms == null) return -1; - BytesRef termBytes = new BytesRef(t.text()); + BytesRef termBytes = t.bytes(); DocsEnum docs = terms.docs(MultiFields.getDeletedDocs(reader), termBytes, null); if (docs == null) return -1; int id = docs.nextDoc(); @@ -754,7 +754,7 @@ Fields fields = sir.fields(); Terms terms = fields.terms(t.field()); - BytesRef termBytes = new BytesRef(t.text()); + BytesRef termBytes = t.bytes(); Bits skipDocs = sir.getDeletedDocs(); DocsEnum docsEnum = terms==null ? null : terms.docs(skipDocs, termBytes, null); Index: lucene/src/test/org/apache/lucene/index/TestAddIndexes.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (revision 957599) +++ lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (working copy) @@ -464,7 +464,7 @@ private void verifyTermDocs(Directory dir, Term term, int numDocs) throws IOException { IndexReader reader = IndexReader.open(dir, true); - DocsEnum docsEnum = MultiFields.getTermDocsEnum(reader, null, term.field, new BytesRef(term.text)); + DocsEnum docsEnum = MultiFields.getTermDocsEnum(reader, null, term.field, term.bytes); int count = 0; while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) count++; Index: lucene/src/test/org/apache/lucene/index/TestPayloads.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestPayloads.java (revision 957599) +++ lucene/src/test/org/apache/lucene/index/TestPayloads.java (working copy) @@ -188,7 +188,7 @@ Term[] terms = generateTerms(fieldName, numTerms); StringBuilder sb = new StringBuilder(); for (int i = 0; i < terms.length; i++) { - sb.append(terms[i].text); + sb.append(terms[i].text()); sb.append(" "); } String content = sb.toString(); Index: lucene/src/test/org/apache/lucene/index/codecs/preflex/TermInfosWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/codecs/preflex/TermInfosWriter.java (revision 957599) +++ lucene/src/test/org/apache/lucene/index/codecs/preflex/TermInfosWriter.java (working copy) @@ -76,7 +76,6 @@ private int lastFieldNumber = -1; private TermInfosWriter other; - private BytesRef utf8Result = new BytesRef(10); TermInfosWriter(Directory directory, String segment, FieldInfos fis, int interval) @@ -106,8 +105,7 @@ } void add(Term term, TermInfo ti) throws IOException { - UnicodeUtil.UTF16toUTF8(term.text(), 0, term.text().length(), utf8Result); - add(fieldInfos.fieldNumber(term.field()), utf8Result.bytes, utf8Result.length, ti); + add(fieldInfos.fieldNumber(term.field()), term.bytes().bytes, term.bytes().length, ti); } // Currently used only by assert statements Index: lucene/src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java =================================================================== --- lucene/src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java (revision 957599) +++ lucene/src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java (working copy) @@ -109,8 +109,15 @@ fieldInfos.write(dir, segName); // sorts in UTF16 order, just like preflex: - Collections.sort(terms); + Comparator utf16comparator = new Comparator() { + @Override + public int compare(Term o1, Term o2) { + return o1.compareToUTF16(o2); + } + }; + Collections.sort(terms, utf16comparator); + TermInfosWriter w = new TermInfosWriter(dir, segName, fieldInfos, 128); TermInfo ti = new TermInfo(); BytesRef utf8 = new BytesRef(10); Index: lucene/src/java/org/apache/lucene/search/PrefixTermsEnum.java =================================================================== --- lucene/src/java/org/apache/lucene/search/PrefixTermsEnum.java (revision 957599) +++ lucene/src/java/org/apache/lucene/search/PrefixTermsEnum.java (working copy) @@ -36,7 +36,7 @@ public PrefixTermsEnum(IndexReader reader, Term prefix) throws IOException { super(reader, prefix.field()); - setInitialSeekTerm(prefixRef = new BytesRef(prefix.text())); + setInitialSeekTerm(prefixRef = prefix.bytes()); } @Override Index: lucene/src/java/org/apache/lucene/search/spans/SpanTermQuery.java =================================================================== --- lucene/src/java/org/apache/lucene/search/spans/SpanTermQuery.java (revision 957599) +++ lucene/src/java/org/apache/lucene/search/spans/SpanTermQuery.java (working copy) @@ -85,16 +85,15 @@ public Spans getSpans(final IndexReader reader) throws IOException { // NOTE: debateably, the caller should never pass in a // multi reader... - final BytesRef textBytes = new BytesRef(term.text()); final DocsAndPositionsEnum postings = MultiFields.getTermPositionsEnum(reader, MultiFields.getDeletedDocs(reader), term.field(), - textBytes); + term.bytes()); if (postings != null) { return new TermSpans(postings, term); } else { - if (MultiFields.getTermDocsEnum(reader, MultiFields.getDeletedDocs(reader), term.field(), textBytes) != null) { + if (MultiFields.getTermDocsEnum(reader, MultiFields.getDeletedDocs(reader), term.field(), term.bytes()) != null) { // term does exist, but has no positions throw new IllegalStateException("field \"" + term.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run SpanTermQuery (term=" + term.text() + ")"); } else { Index: lucene/src/java/org/apache/lucene/search/PrefixQuery.java =================================================================== --- lucene/src/java/org/apache/lucene/search/PrefixQuery.java (revision 957599) +++ lucene/src/java/org/apache/lucene/search/PrefixQuery.java (working copy) @@ -46,7 +46,7 @@ @Override protected TermsEnum getTermsEnum(IndexReader reader) throws IOException { - if (prefix.text().length() == 0) { + if (prefix.bytes().length == 0) { // no prefix -- match all terms for this field: final Terms terms = MultiFields.getTerms(reader, getField()); return (terms != null) ? terms.iterator() : TermsEnum.EMPTY; Index: lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java =================================================================== --- lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java (revision 957599) +++ lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java (working copy) @@ -499,14 +499,13 @@ List docsEnums = new LinkedList(); final Bits delDocs = MultiFields.getDeletedDocs(indexReader); for (int i = 0; i < terms.length; i++) { - final BytesRef text = new BytesRef(terms[i].text()); DocsAndPositionsEnum postings = indexReader.termPositionsEnum(delDocs, terms[i].field(), - text); + terms[i].bytes()); if (postings != null) { docsEnums.add(postings); } else { - if (MultiFields.getTermDocsEnum(indexReader, delDocs, terms[i].field(), text) != null) { + if (MultiFields.getTermDocsEnum(indexReader, delDocs, terms[i].field(), terms[i].bytes()) != null) { // term does exist, but has no positions throw new IllegalStateException("field \"" + terms[i].field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + terms[i].text() + ")"); } Index: lucene/src/java/org/apache/lucene/search/PhraseQuery.java =================================================================== --- lucene/src/java/org/apache/lucene/search/PhraseQuery.java (revision 957599) +++ lucene/src/java/org/apache/lucene/search/PhraseQuery.java (working copy) @@ -184,15 +184,14 @@ final Bits delDocs = MultiFields.getDeletedDocs(reader); for (int i = 0; i < terms.size(); i++) { final Term t = terms.get(i); - final BytesRef text = new BytesRef(t.text()); DocsAndPositionsEnum postingsEnum = MultiFields.getTermPositionsEnum(reader, delDocs, t.field(), - text); + t.bytes()); // PhraseQuery on a field that did not index // positions. if (postingsEnum == null) { - if (MultiFields.getTermDocsEnum(reader, delDocs, t.field(), text) != null) { + if (MultiFields.getTermDocsEnum(reader, delDocs, t.field(), t.bytes()) != null) { // term does exist, but has no positions throw new IllegalStateException("field \"" + t.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + t.text() + ")"); } else { @@ -200,7 +199,7 @@ return null; } } - postingsFreqs[i] = new PostingsAndFreq(postingsEnum, reader.docFreq(t.field(), text), positions.get(i).intValue()); + postingsFreqs[i] = new PostingsAndFreq(postingsEnum, reader.docFreq(t.field(), t.bytes()), positions.get(i).intValue()); } // sort by increasing docFreq order Index: lucene/src/java/org/apache/lucene/search/TermQuery.java =================================================================== --- lucene/src/java/org/apache/lucene/search/TermQuery.java (revision 957599) +++ lucene/src/java/org/apache/lucene/search/TermQuery.java (working copy) @@ -75,7 +75,7 @@ public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { // NOTE: debateably, the caller should never pass in a // multi reader... - DocsEnum docs = MultiFields.getTermDocsEnum(reader, MultiFields.getDeletedDocs(reader), term.field(), new BytesRef(term.text())); + DocsEnum docs = MultiFields.getTermDocsEnum(reader, MultiFields.getDeletedDocs(reader), term.field(), term.bytes()); if (docs == null) { return null; } @@ -118,7 +118,7 @@ Explanation tfExplanation = new Explanation(); int tf = 0; - DocsEnum docs = reader.termDocsEnum(MultiFields.getDeletedDocs(reader), term.field(), new BytesRef(term.text())); + DocsEnum docs = reader.termDocsEnum(MultiFields.getDeletedDocs(reader), term.field(), term.bytes()); if (docs != null) { int newDoc = docs.advance(doc); if (newDoc == doc) { Index: lucene/src/java/org/apache/lucene/search/SingleTermsEnum.java =================================================================== --- lucene/src/java/org/apache/lucene/search/SingleTermsEnum.java (revision 957599) +++ lucene/src/java/org/apache/lucene/search/SingleTermsEnum.java (working copy) @@ -41,7 +41,7 @@ */ public SingleTermsEnum(IndexReader reader, Term singleTerm) throws IOException { super(reader, singleTerm.field()); - singleRef = new BytesRef(singleTerm.text()); + singleRef = singleTerm.bytes(); setInitialSeekTerm(singleRef); } Index: lucene/src/java/org/apache/lucene/index/Term.java =================================================================== --- lucene/src/java/org/apache/lucene/index/Term.java (revision 957599) +++ lucene/src/java/org/apache/lucene/index/Term.java (working copy) @@ -17,6 +17,9 @@ * limitations under the License. */ +import java.util.Comparator; + +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.StringHelper; /** @@ -29,14 +32,26 @@ public final class Term implements Comparable, java.io.Serializable { String field; - String text; + BytesRef bytes; + /** Constructs a Term with the given field and bytes. + *

Note that a null field or null bytes value results in undefined + * behavior for most Lucene APIs that accept a Term parameter. + *

WARNING: the provided BytesRef is not copied, but used directly. + * Therefore the bytes should not be modified after construction, for + * example, you should clone a copy rather than pass reused bytes from + * a TermsEnum. + */ + public Term(String fld, BytesRef bytes) { + field = fld == null ? null : StringHelper.intern(fld); + this.bytes = bytes; + } + /** Constructs a Term with the given field and text. *

Note that a null field or null text value results in undefined * behavior for most Lucene APIs that accept a Term parameter. */ - public Term(String fld, String txt) { - field = fld == null ? null : StringHelper.intern(fld); - text = txt; + public Term(String fld, String text) { + this(fld, new BytesRef(text)); } /** Constructs a Term with the given field and empty text. @@ -46,15 +61,27 @@ * @param fld */ public Term(String fld) { - this(fld, "", true); + this(fld, BytesRef.EMPTY, true); } - /** @lucene.experimental */ - public Term(String fld, String txt, boolean intern) { + /** + * WARNING: the provided BytesRef is not copied, but used directly. + * Therefore the bytes should not be modified after construction, for + * example, you should clone a copy rather than pass reused bytes from + * a TermsEnum. + * + * @lucene.experimental + */ + public Term(String fld, BytesRef bytes, boolean intern) { field = intern ? StringHelper.intern(fld) : fld; // field names are interned - text = txt; // unless already known to be + this.bytes = bytes; // unless already known to be } + /** @lucene.experimental */ + public Term(String fld, String text, boolean intern) { + this(fld, new BytesRef(text), intern); + } + /** Returns the field of this term, an interned string. The field indicates the part of a document which this term came from. */ public final String field() { return field; } @@ -62,11 +89,29 @@ /** Returns the text of this term. In the case of words, this is simply the text of the word. In the case of dates and other types, this is an encoding of the object as a string. */ - public final String text() { return text; } - + public final String text() { return bytes.utf8ToString(); } + + /** Returns the bytes of this term. */ + public final BytesRef bytes() { return bytes; } + /** * Optimized construction of new Terms by reusing same field as this Term * - avoids field.intern() overhead + *

WARNING: the provided BytesRef is not copied, but used directly. + * Therefore the bytes should not be modified after construction, for + * example, you should clone a copy rather than pass reused bytes from + * a TermsEnum. + * @param text The bytes of the new term (field is implicitly same as this Term instance) + * @return A new Term + */ + public Term createTerm(BytesRef bytes) + { + return new Term(field,bytes,false); + } + + /** + * Optimized construction of new Terms by reusing same field as this Term + * - avoids field.intern() overhead * @param text The text of the new term (field is implicitly same as this Term instance) * @return A new Term */ @@ -89,10 +134,10 @@ return false; } else if (!field.equals(other.field)) return false; - if (text == null) { - if (other.text != null) + if (bytes == null) { + if (other.bytes != null) return false; - } else if (!text.equals(other.text)) + } else if (!bytes.equals(other.bytes)) return false; return true; } @@ -102,7 +147,7 @@ final int prime = 31; int result = 1; result = prime * result + ((field == null) ? 0 : field.hashCode()); - result = prime * result + ((text == null) ? 0 : text.hashCode()); + result = prime * result + ((bytes == null) ? 0 : bytes.hashCode()); return result; } @@ -113,19 +158,47 @@ The ordering of terms is first by field, then by text.*/ public final int compareTo(Term other) { if (field == other.field) // fields are interned - return text.compareTo(other.text); + return bytes.compareTo(other.bytes); else return field.compareTo(other.field); } + + @Deprecated + private static final Comparator legacyComparator = + BytesRef.getUTF8SortedAsUTF16Comparator(); + /** + * @deprecated For internal backwards compatibility use only + * @lucene.internal + */ + @Deprecated + public final int compareToUTF16(Term other) { + if (field == other.field) // fields are interned + return legacyComparator.compare(this.bytes, other.bytes); + else + return field.compareTo(other.field); + } + + /** + * Resets the field and text of a Term. + *

WARNING: the provided BytesRef is not copied, but used directly. + * Therefore the bytes should not be modified after construction, for + * example, you should clone a copy rather than pass reused bytes from + * a TermsEnum. + */ + final void set(String fld, BytesRef bytes) { + field = fld; + this.bytes = bytes; + } + /** Resets the field and text of a Term. */ final void set(String fld, String txt) { field = fld; - text = txt; + this.bytes = new BytesRef(txt); } @Override - public final String toString() { return field + ":" + text; } + public final String toString() { return field + ":" + bytes.utf8ToString(); } private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException Index: lucene/src/java/org/apache/lucene/index/IndexReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/IndexReader.java (revision 957599) +++ lucene/src/java/org/apache/lucene/index/IndexReader.java (working copy) @@ -883,7 +883,7 @@ public abstract Fields fields() throws IOException; public int docFreq(Term term) throws IOException { - return docFreq(term.field(), new BytesRef(term.text())); + return docFreq(term.field(), term.bytes()); } /** Returns the number of documents containing the term @@ -1000,7 +1000,7 @@ DocsEnum docs = MultiFields.getTermDocsEnum(this, MultiFields.getDeletedDocs(this), term.field(), - new BytesRef(term.text())); + term.bytes()); if (docs == null) return 0; int n = 0; int doc; Index: lucene/src/java/org/apache/lucene/index/DocumentsWriter.java =================================================================== --- lucene/src/java/org/apache/lucene/index/DocumentsWriter.java (revision 957599) +++ lucene/src/java/org/apache/lucene/index/DocumentsWriter.java (working copy) @@ -1073,7 +1073,6 @@ TermsEnum termsEnum = null; String currentField = null; - BytesRef termRef = new BytesRef(); DocsEnum docs = null; for (Entry entry: deletesFlushed.terms.entrySet()) { @@ -1097,9 +1096,7 @@ } assert checkDeleteTerm(term); - termRef.copy(term.text()); - - if (termsEnum.seek(termRef, false) == TermsEnum.SeekStatus.FOUND) { + if (termsEnum.seek(term.bytes(), false) == TermsEnum.SeekStatus.FOUND) { DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs); if (docsEnum != null) { @@ -1166,7 +1163,7 @@ num.setNum(docIDUpto); deletesInRAM.numTerms++; - deletesInRAM.addBytesUsed(BYTES_PER_DEL_TERM + term.text.length()*CHAR_NUM_BYTE); + deletesInRAM.addBytesUsed(BYTES_PER_DEL_TERM + term.bytes.length); } // Buffer a specific docID for deletion. Currently only Index: lucene/src/java/org/apache/lucene/index/codecs/preflex/TermBuffer.java =================================================================== --- lucene/src/java/org/apache/lucene/index/codecs/preflex/TermBuffer.java (revision 957599) +++ lucene/src/java/org/apache/lucene/index/codecs/preflex/TermBuffer.java (working copy) @@ -96,6 +96,7 @@ reset(); return; } + // TODO: use UnicodeUtil here? final String termText = term.text(); final int termLen = termText.length(); text.setLength(termLen); Index: lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java (revision 957599) +++ lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java (working copy) @@ -189,7 +189,7 @@ while (hi >= lo) { int mid = (lo + hi) >>> 1; - int delta = term.compareTo(indexTerms[mid]); + int delta = term.compareToUTF16(indexTerms[mid]); if (delta < 0) hi = mid - 1; else if (delta > 0) @@ -234,17 +234,17 @@ // optimize sequential access: first try scanning cached enum w/o seeking if (enumerator.term() != null // term is at or past current - && ((enumerator.prev() != null && term.compareTo(enumerator.prev())> 0) - || term.compareTo(enumerator.term()) >= 0)) { + && ((enumerator.prev() != null && term.compareToUTF16(enumerator.prev())> 0) + || term.compareToUTF16(enumerator.term()) >= 0)) { int enumOffset = (int)(enumerator.position/totalIndexInterval)+1; if (indexTerms.length == enumOffset // but before end of block - || term.compareTo(indexTerms[enumOffset]) < 0) { + || term.compareToUTF16(indexTerms[enumOffset]) < 0) { // no need to seek final TermInfo ti; int numScans = enumerator.scanTo(term); - if (enumerator.term() != null && term.compareTo(enumerator.term()) == 0) { + if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) { ti = enumerator.termInfo(); if (numScans > 1) { // we only want to put this TermInfo into the cache if @@ -279,7 +279,7 @@ seekEnum(enumerator, indexPos); enumerator.scanTo(term); final TermInfo ti; - if (enumerator.term() != null && term.compareTo(enumerator.term()) == 0) { + if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) { ti = enumerator.termInfo(); if (tiOrd == null) { termsCache.put(new CloneableTerm(term), new TermInfoAndOrd(ti, (int) enumerator.position)); @@ -328,9 +328,9 @@ SegmentTermEnum enumerator = getThreadResources().termEnum; seekEnum(enumerator, indexOffset); - while(term.compareTo(enumerator.term()) > 0 && enumerator.next()) {} + while(term.compareToUTF16(enumerator.term()) > 0 && enumerator.next()) {} - if (term.compareTo(enumerator.term()) == 0) + if (term.compareToUTF16(enumerator.term()) == 0) return enumerator.position; else return -1; Index: lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java =================================================================== --- lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java (revision 957599) +++ lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java (working copy) @@ -319,7 +319,8 @@ assert pendingPrefix != null; assert pendingPrefix.length > seekPrefix; pendingPrefix[seekPrefix] = UnicodeUtil.UNI_SUR_HIGH_START; - Term t2 = protoTerm.createTerm(new String(pendingPrefix, 0, 1+seekPrefix)); + pendingPrefix[1+seekPrefix] = UnicodeUtil.UNI_SUR_LOW_START; + Term t2 = protoTerm.createTerm(new String(pendingPrefix, 0, 2+seekPrefix)); if (DEBUG_SURROGATES) { System.out.println(" do pop; seek back to " + UnicodeUtil.toHexString(t2.text())); } @@ -366,6 +367,7 @@ if (t == null || t.field() != fieldInfo.name) { return false; } + // TODO: use unicodeutil here? final String text = t.text(); final int textLen = text.length(); @@ -385,17 +387,18 @@ // surrogate range; if so, we must first iterate // them, then seek back to the surrogates - char[] testPrefix = new char[i+1]; + char[] testPrefix = new char[i+2]; for(int j=0;j utf8SortedAsUTF16SortOrder = new UTF8SortedAsUTF16Comparator(); + + public static Comparator getUTF8SortedAsUTF16Comparator() { + return utf8SortedAsUTF16SortOrder; + } + + private static class UTF8SortedAsUTF16Comparator implements Comparator { + // Only singleton + private UTF8SortedAsUTF16Comparator() {}; + + public int compare(BytesRef a, BytesRef b) { + + final byte[] aBytes = a.bytes; + int aUpto = a.offset; + final byte[] bBytes = b.bytes; + int bUpto = b.offset; + + final int aStop; + if (a.length < b.length) { + aStop = aUpto + a.length; + } else { + aStop = aUpto + b.length; + } + + while(aUpto < aStop) { + int aByte = aBytes[aUpto++] & 0xff; + int bByte = bBytes[bUpto++] & 0xff; + + if (aByte != bByte) { + + // See http://icu-project.org/docs/papers/utf16_code_point_order.html#utf-8-in-utf-16-order + + // We know the terms are not equal, but, we may + // have to carefully fixup the bytes at the + // difference to match UTF16's sort order: + if (aByte >= 0xee && bByte >= 0xee) { + if ((aByte & 0xfe) == 0xee) { + aByte += 0x10; + } + if ((bByte&0xfe) == 0xee) { + bByte += 0x10; + } + } + return aByte - bByte; + } + } + + // One is a prefix of the other, or, they are equal: + return a.length - b.length; + } + + public boolean equals(Object other) { + return this == other; + } + } + public void writeExternal(ObjectOutput out) throws IOException {