diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/TestDemo.java --- a/lucene/src/test/org/apache/lucene/TestDemo.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/TestDemo.java Sat Sep 11 12:37:22 2010 -0400 @@ -57,7 +57,7 @@ Document doc = new Document(); String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; String text = "This is the text to be indexed. " + longTerm; - doc.add(new Field("fieldname", text, Field.Store.YES, + doc.add(newField("fieldname", text, Field.Store.YES, Field.Index.ANALYZED)); iwriter.addDocument(doc); iwriter.close(); diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/TestExternalCodecs.java --- a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java Sat Sep 11 12:37:22 2010 -0400 @@ -607,11 +607,11 @@ w.setMergeFactor(3); Document doc = new Document(); // uses default codec: - doc.add(new Field("field1", "this field uses the standard codec as the test", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("field1", "this field uses the standard codec as the test", Field.Store.NO, Field.Index.ANALYZED)); // uses pulsing codec: - doc.add(new Field("field2", "this field uses the pulsing codec as the test", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("field2", "this field uses the pulsing codec as the test", Field.Store.NO, Field.Index.ANALYZED)); - Field idField = new Field("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED); + Field idField = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED); doc.add(idField); for(int i=0;i0) { int k = i-1; @@ -1196,7 +1196,7 @@ ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(10); for(int i=0;i<4;i++) { Document doc = new Document(); - doc.add(new Field("id", ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED)); + doc.add(newField("id", ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED)); writer.addDocument(doc); Map data = new HashMap(); data.put("index", i+""); diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/index/TestIndexWriter.java --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Sat Sep 11 12:37:22 2010 -0400 @@ -148,18 +148,18 @@ dir.close(); } - private static void addDoc(IndexWriter writer) throws IOException + private void addDoc(IndexWriter writer) throws IOException { Document doc = new Document(); - doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(doc); } private void addDocWithIndex(IndexWriter writer, int index) throws IOException { Document doc = new Document(); - doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("id", "" + index, Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(doc); } @@ -555,7 +555,7 @@ MockDirectoryWrapper dir = newDirectory(); final Document doc = new Document(); - doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED)); for(int numDocs=38;numDocs<500;numDocs += 38) { LogDocMergePolicy ldmp = new LogDocMergePolicy(); @@ -596,7 +596,7 @@ MockDirectoryWrapper dir = newDirectory(); final Document doc = new Document(); - doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED)); LogDocMergePolicy ldmp = new LogDocMergePolicy(); ldmp.setMinMergeDocs(1); @@ -1137,12 +1137,12 @@ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10)); for(int j=0;j<100;j++) { Document doc = new Document(); - doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(doc); } writer.close(); @@ -1169,7 +1169,7 @@ int lastNumFile = dir.listAll().length; for(int j=0;j<9;j++) { Document doc = new Document(); - doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(doc); int numFile = dir.listAll().length; // Verify that with a tiny RAM buffer we see new @@ -1198,7 +1198,7 @@ int lastFlushCount = -1; for(int j=1;j<52;j++) { Document doc = new Document(); - doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(doc); _TestUtil.syncConcurrentMerges(writer); int flushCount = writer.getFlushCount(); @@ -1255,7 +1255,7 @@ for(int j=1;j<52;j++) { Document doc = new Document(); - doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(doc); } @@ -1315,7 +1315,7 @@ for(int j=0;j<100;j++) { Document doc = new Document(); for(int k=0;k<100;k++) { - doc.add(new Field("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED)); } writer.addDocument(doc); } @@ -1324,7 +1324,7 @@ // occurs (heavy on byte blocks) for(int j=0;j<100;j++) { Document doc = new Document(); - doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(doc); } @@ -1339,7 +1339,7 @@ String longTerm = b.toString(); Document doc = new Document(); - doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field", longTerm, Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(doc); } } @@ -1359,7 +1359,7 @@ // Enable norms for only 1 doc, pre flush for(int j=0;j<10;j++) { Document doc = new Document(); - Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED); + Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED); if (j != 8) { f.setOmitNorms(true); } @@ -1380,7 +1380,7 @@ // Enable norms for only 1 doc, post flush for(int j=0;j<27;j++) { Document doc = new Document(); - Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED); + Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED); if (j != 26) { f.setOmitNorms(true); } @@ -1412,7 +1412,7 @@ b.append(" a a a a a a a a"); } Document doc = new Document(); - doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); writer.close(); @@ -1477,7 +1477,7 @@ TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)); ((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10); Document doc = new Document(); - doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); for(int i=0;i<19;i++) writer.addDocument(doc); writer.flush(false, true, true); @@ -1496,7 +1496,7 @@ Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); Document doc = new Document(); - doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); writer.commit(); writer.addDocument(new Document()); @@ -1520,7 +1520,7 @@ .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2)); ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(101); Document doc = new Document(); - doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); for(int i=0;i<200;i++) writer.addDocument(doc); writer.optimize(false); @@ -1573,7 +1573,7 @@ TEST_VERSION_CURRENT, new MockAnalyzer())); Document document = new Document(); - document.add(new Field("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES)); + document.add(newField("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES)); iw.addDocument(document); iw.close(); dir.close(); @@ -1585,17 +1585,17 @@ IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); Document document = new Document(); - document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, + document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); iw.addDocument(document); document = new Document(); - document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED, + document.add(newField("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO)); iw.addDocument(document); // Make first segment iw.commit(); - document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, + document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); iw.addDocument(document); // Make 2nd segment @@ -1612,13 +1612,13 @@ IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); Document document = new Document(); - document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, + document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); iw.addDocument(document); iw.commit(); document = new Document(); - document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED, + document.add(newField("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO)); iw.addDocument(document); // Make first segment @@ -1626,7 +1626,7 @@ iw.optimize(); - document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, + document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); iw.addDocument(document); // Make 2nd segment @@ -1648,7 +1648,7 @@ ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2); IndexWriter iw = new IndexWriter(dir, conf); Document document = new Document(); - document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, + document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); Thread.currentThread().setPriority(Thread.MAX_PRIORITY); for(int i=0;i<4;i++) @@ -1692,7 +1692,7 @@ lmp.setMergeFactor(2); IndexWriter iw = new IndexWriter(dir, conf); Document document = new Document(); - document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, + document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); for(int i=0;i<177;i++) iw.addDocument(document); @@ -1725,7 +1725,7 @@ Document doc = new Document(); String contents = "aa bb cc dd ee ff gg hh ii jj kk"; - doc.add(new Field("content", contents, Field.Store.NO, + doc.add(newField("content", contents, Field.Store.NO, Field.Index.ANALYZED)); try { writer.addDocument(doc); @@ -1735,13 +1735,13 @@ // Make sure we can add another normal document doc = new Document(); - doc.add(new Field("content", "aa bb cc dd", Field.Store.NO, + doc.add(newField("content", "aa bb cc dd", Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(doc); // Make sure we can add another normal document doc = new Document(); - doc.add(new Field("content", "aa bb cc dd", Field.Store.NO, + doc.add(newField("content", "aa bb cc dd", Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(doc); @@ -1813,7 +1813,7 @@ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)); Document doc = new Document(); String contents = "aa bb cc dd ee ff gg hh ii jj kk"; - doc.add(new Field("content", contents, Field.Store.NO, + doc.add(newField("content", contents, Field.Store.NO, Field.Index.ANALYZED)); boolean hitError = false; for(int i=0;i<200;i++) { @@ -1869,13 +1869,13 @@ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)); //writer.setInfoStream(System.out); Document doc = new Document(); - doc.add(new Field("contents", "here are some contents", Field.Store.YES, + doc.add(newField("contents", "here are some contents", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); writer.addDocument(doc); - doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES, + doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - doc.add(new Field("other", "this will not get indexed", Field.Store.YES, + doc.add(newField("other", "this will not get indexed", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); try { writer.addDocument(doc); @@ -1885,7 +1885,7 @@ if (0 == i) { doc = new Document(); - doc.add(new Field("contents", "here are some contents", Field.Store.YES, + doc.add(newField("contents", "here are some contents", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); writer.addDocument(doc); @@ -1914,7 +1914,7 @@ writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(10)); doc = new Document(); - doc.add(new Field("contents", "here are some contents", Field.Store.YES, + doc.add(newField("contents", "here are some contents", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); for(int j=0;j<17;j++) writer.addDocument(doc); @@ -1965,13 +1965,13 @@ try { for(int iter=0;iter allTerms = new HashSet(); @@ -4799,7 +4799,7 @@ s.append(' ').append(""+i); } Document d = new Document(); - Field f = new Field("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED); + Field f = newField("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED); d.add(f); w.addDocument(d); IndexReader r = w.getReader(2).getSequentialSubReaders()[0]; @@ -4824,7 +4824,7 @@ IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); ((LogMergePolicy) w.getMergePolicy()).setUseCompoundFile(true); Document doc = new Document(); - doc.add(new Field("field", "go", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("field", "go", Field.Store.NO, Field.Index.ANALYZED)); w.addDocument(doc); IndexReader r; if (iter == 0) { @@ -4887,7 +4887,7 @@ // First commit Document doc = new Document(); - doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); writer.commit(); assertEquals(1, IndexReader.listCommits(dir).size()); @@ -4897,7 +4897,7 @@ // Second commit - now KeepOnlyLastCommit cannot delete the prev commit. doc = new Document(); - doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); writer.commit(); assertEquals(2, IndexReader.listCommits(dir).size()); @@ -4928,7 +4928,7 @@ FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1)); //w.setInfoStream(System.out); Document doc = new Document(); - doc.add(new Field("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED)); int num = 6 * RANDOM_MULTIPLIER; for (int iter = 0; iter < num; iter++) { int count = 0; @@ -5004,13 +5004,13 @@ Document doc = new Document(); // create as many files as possible - doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); // Adding just one document does not call flush yet. assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length); doc = new Document(); - doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); // The second document should cause a flush. assertTrue("flush should have occurred and files created", dir.listAll().length > 5 + extraFileCount); @@ -5034,7 +5034,7 @@ TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)); Document doc = new Document(); - doc.add(new Field("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); w.addDocument(doc); w.addDocument(doc); IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig( @@ -5140,7 +5140,7 @@ final List fieldIDs = new ArrayList(); - Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); + Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); for(int i=0;i fields = new ArrayList(); String idString = getIdString(); - Field idField = new Field(idTerm.field(), idString, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); + Field idField = newField(idTerm.field(), idString, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); fields.add(idField); int nFields = nextInt(maxFields); @@ -663,16 +669,16 @@ switch (nextInt(4)) { case 0: - fields.add(new Field("f" + nextInt(100), getString(1), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS, tvVal)); + fields.add(newField("f" + nextInt(100), getString(1), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS, tvVal)); break; case 1: - fields.add(new Field("f" + nextInt(100), getString(0), Field.Store.NO, Field.Index.ANALYZED, tvVal)); + fields.add(newField("f" + nextInt(100), getString(0), Field.Store.NO, Field.Index.ANALYZED, tvVal)); break; case 2: - fields.add(new Field("f" + nextInt(100), getString(0), Field.Store.YES, Field.Index.NO, Field.TermVector.NO)); + fields.add(newField("f" + nextInt(100), getString(0), Field.Store.YES, Field.Index.NO, Field.TermVector.NO)); break; case 3: - fields.add(new Field("f" + nextInt(100), getString(bigFieldSize), Field.Store.YES, Field.Index.ANALYZED, tvVal)); + fields.add(newField("f" + nextInt(100), getString(bigFieldSize), Field.Store.YES, Field.Index.ANALYZED, tvVal)); break; } } diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/index/TestTermdocPerf.java --- a/lucene/src/test/org/apache/lucene/index/TestTermdocPerf.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/index/TestTermdocPerf.java Sat Sep 11 12:37:22 2010 -0400 @@ -69,7 +69,7 @@ }; Document doc = new Document(); - doc.add(new Field(field,val, Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS)); + doc.add(newField(field,val, Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS)); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer) .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(100)); diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/index/TestThreadedOptimize.java --- a/lucene/src/test/org/apache/lucene/index/TestThreadedOptimize.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/index/TestThreadedOptimize.java Sat Sep 11 12:37:22 2010 -0400 @@ -66,8 +66,8 @@ for(int i=0;i<200;i++) { Document d = new Document(); - d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); - d.add(new Field("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED)); + d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); + d.add(newField("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(d); } @@ -87,8 +87,8 @@ writerFinal.optimize(false); for(int k=0;k<17*(1+iFinal);k++) { Document d = new Document(); - d.add(new Field("id", iterFinal + "_" + iFinal + "_" + j + "_" + k, Field.Store.YES, Field.Index.NOT_ANALYZED)); - d.add(new Field("contents", English.intToEnglish(iFinal+k), Field.Store.NO, Field.Index.ANALYZED)); + d.add(newField("id", iterFinal + "_" + iFinal + "_" + j + "_" + k, Field.Store.YES, Field.Index.NOT_ANALYZED)); + d.add(newField("contents", English.intToEnglish(iFinal+k), Field.Store.NO, Field.Index.ANALYZED)); writerFinal.addDocument(d); } for(int k=0;k<9*(1+iFinal);k++) diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/index/TestTransactionRollback.java --- a/lucene/src/test/org/apache/lucene/index/TestTransactionRollback.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/index/TestTransactionRollback.java Sat Sep 11 12:37:22 2010 -0400 @@ -130,7 +130,7 @@ IndexWriter w=new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(sdp)); for(int currentRecordId=1;currentRecordId<=100;currentRecordId++) { Document doc=new Document(); - doc.add(new Field(FIELD_RECORD_ID,""+currentRecordId,Field.Store.YES,Field.Index.ANALYZED)); + doc.add(newField(FIELD_RECORD_ID,""+currentRecordId,Field.Store.YES,Field.Index.ANALYZED)); w.addDocument(doc); if (currentRecordId%10 == 0) { diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/index/TestTransactions.java --- a/lucene/src/test/org/apache/lucene/index/TestTransactions.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/index/TestTransactions.java Sat Sep 11 12:37:22 2010 -0400 @@ -140,8 +140,8 @@ for(int j=0; j<10; j++) { Document d = new Document(); int n = random.nextInt(); - d.add(new Field("id", Integer.toString(nextID++), Field.Store.YES, Field.Index.NOT_ANALYZED)); - d.add(new Field("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED)); + d.add(newField("id", Integer.toString(nextID++), Field.Store.YES, Field.Index.NOT_ANALYZED)); + d.add(newField("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(d); } @@ -185,7 +185,7 @@ for(int j=0; j<7; j++) { Document d = new Document(); int n = random.nextInt(); - d.add(new Field("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED)); + d.add(newField("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(d); } writer.close(); diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java --- a/lucene/src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java Sat Sep 11 12:37:22 2010 -0400 @@ -297,7 +297,7 @@ uniqueTerms.add(term); fieldTerms.add(new Term(field, term)); Document doc = new Document(); - doc.add(new Field(field, term, Field.Store.NO, Field.Index.NOT_ANALYZED)); + doc.add(newField(field, term, Field.Store.NO, Field.Index.NOT_ANALYZED)); w.addDocument(doc); } uniqueTermCount += uniqueTerms.size(); diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java --- a/lucene/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java Sat Sep 11 12:37:22 2010 -0400 @@ -284,7 +284,7 @@ Directory ramDir = newDirectory(); IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); - doc.add(new Field("body", "blah the footest blah", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("body", "blah the footest blah", Field.Store.NO, Field.Index.ANALYZED)); iw.addDocument(doc); iw.close(); diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/queryParser/TestQueryParser.java --- a/lucene/src/test/org/apache/lucene/queryParser/TestQueryParser.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/queryParser/TestQueryParser.java Sat Sep 11 12:37:22 2010 -0400 @@ -571,7 +571,7 @@ Directory ramDir = newDirectory(); IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false))); Document doc = new Document(); - doc.add(new Field("content","\u0633\u0627\u0628", + doc.add(newField("content","\u0633\u0627\u0628", Field.Store.YES, Field.Index.NOT_ANALYZED)); iw.addDocument(doc); iw.close(); @@ -1131,13 +1131,13 @@ assertEquals(expected, hits.length); } - private static void addDateDoc(String content, int year, int month, + private void addDateDoc(String content, int year, int month, int day, int hour, int minute, int second, IndexWriter iw) throws IOException { Document d = new Document(); - d.add(new Field("f", content, Field.Store.YES, Field.Index.ANALYZED)); + d.add(newField("f", content, Field.Store.YES, Field.Index.ANALYZED)); Calendar cal = Calendar.getInstance(Locale.ENGLISH); cal.set(year, month-1, day, hour, minute, second); - d.add(new Field("date", DateField.dateToString(cal.getTime()), Field.Store.YES, Field.Index.NOT_ANALYZED)); + d.add(newField("date", DateField.dateToString(cal.getTime()), Field.Store.YES, Field.Index.NOT_ANALYZED)); iw.addDocument(d); } @@ -1155,7 +1155,7 @@ Analyzer a = new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, a)); Document doc = new Document(); - doc.add(new Field("f", "the wizard of ozzy", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("f", "the wizard of ozzy", Field.Store.NO, Field.Index.ANALYZED)); w.addDocument(doc); IndexReader r = w.getReader(); w.close(); diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java --- a/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java Sat Sep 11 12:37:22 2010 -0400 @@ -111,7 +111,7 @@ for (int d = minId; d <= maxId; d++) { Document doc = new Document(); - doc.add(new Field("id", pad(d), Field.Store.YES, + doc.add(newField("id", pad(d), Field.Store.YES, Field.Index.NOT_ANALYZED)); int r = index.allowNegativeRandomInts ? random.nextInt() : random .nextInt(Integer.MAX_VALUE); @@ -121,9 +121,9 @@ if (r < index.minR) { index.minR = r; } - doc.add(new Field("rand", pad(r), Field.Store.YES, + doc.add(newField("rand", pad(r), Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(new Field("body", "body", Field.Store.YES, + doc.add(newField("body", "body", Field.Store.YES, Field.Index.NOT_ANALYZED)); writer.addDocument(doc); } diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java --- a/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java Sat Sep 11 12:37:22 2010 -0400 @@ -43,11 +43,11 @@ directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, directory); Document doc = new Document(); - Field titleField = new Field("title", "some title", Field.Store.NO, + Field titleField = newField("title", "some title", Field.Store.NO, Field.Index.ANALYZED); - Field field = new Field(FN, "this is document one 2345", Field.Store.NO, + Field field = newField(FN, "this is document one 2345", Field.Store.NO, Field.Index.ANALYZED); - Field footerField = new Field("footer", "a footer", Field.Store.NO, + Field footerField = newField("footer", "a footer", Field.Store.NO, Field.Index.ANALYZED); doc.add(titleField); doc.add(field); diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java --- a/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java Sat Sep 11 12:37:22 2010 -0400 @@ -46,11 +46,11 @@ directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, directory); Document doc = new Document(); - Field titleField = new Field("title", "some title", Field.Store.NO, + Field titleField = newField("title", "some title", Field.Store.NO, Field.Index.ANALYZED); - Field field = new Field(FN, "", Field.Store.NO, + Field field = newField(FN, "", Field.Store.NO, Field.Index.ANALYZED); - Field footerField = new Field("footer", "a footer", Field.Store.NO, + Field footerField = newField("footer", "a footer", Field.Store.NO, Field.Index.ANALYZED); doc.add(titleField); doc.add(field); diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/search/TestBoolean2.java --- a/lucene/src/test/org/apache/lucene/search/TestBoolean2.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/search/TestBoolean2.java Sat Sep 11 12:37:22 2010 -0400 @@ -54,7 +54,7 @@ RandomIndexWriter writer= new RandomIndexWriter(random, directory); for (int i = 0; i < docFields.length; i++) { Document doc = new Document(); - doc.add(new Field(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(doc); } writer.close(); @@ -77,12 +77,12 @@ RandomIndexWriter w = new RandomIndexWriter(random, dir2); Document doc = new Document(); - doc.add(new Field("field2", "xxx", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("field2", "xxx", Field.Store.NO, Field.Index.ANALYZED)); for(int i=0;i> docs = new ArrayList>(); Document d = new Document(); - Field f = new Field("f", "", Field.Store.NO, Field.Index.ANALYZED); + Field f = newField("f", "", Field.Store.NO, Field.Index.ANALYZED); d.add(f); Random r = random; diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java --- a/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java Sat Sep 11 12:37:22 2010 -0400 @@ -93,7 +93,7 @@ Directory store = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, store, analyzer); Document d = new Document(); - d.add(new Field("field", "bogus", Field.Store.YES, Field.Index.ANALYZED)); + d.add(newField("field", "bogus", Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(d); IndexReader reader = writer.getReader(); writer.close(); diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java --- a/lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java Sat Sep 11 12:37:22 2010 -0400 @@ -40,7 +40,7 @@ RandomIndexWriter writer = new RandomIndexWriter(random, directory); for (int i = 0; i < categories.length; i++) { Document doc = new Document(); - doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED)); + doc.add(newField("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED)); writer.addDocument(doc); } IndexReader reader = writer.getReader(); diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java --- a/lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java Sat Sep 11 12:37:22 2010 -0400 @@ -50,26 +50,26 @@ for (int i = 0; i < 5137; ++i) { Document doc = new Document(); - doc.add(new Field(FIELD, "meaninglessnames", Field.Store.YES, + doc.add(newField(FIELD, "meaninglessnames", Field.Store.YES, Field.Index.NOT_ANALYZED)); writer.addDocument(doc); } { Document doc = new Document(); - doc.add(new Field(FIELD, "tangfulin", Field.Store.YES, + doc.add(newField(FIELD, "tangfulin", Field.Store.YES, Field.Index.NOT_ANALYZED)); writer.addDocument(doc); } for (int i = 5138; i < 11377; ++i) { Document doc = new Document(); - doc.add(new Field(FIELD, "meaninglessnames", Field.Store.YES, + doc.add(newField(FIELD, "meaninglessnames", Field.Store.YES, Field.Index.NOT_ANALYZED)); writer.addDocument(doc); } { Document doc = new Document(); - doc.add(new Field(FIELD, "tangfulin", Field.Store.YES, + doc.add(newField(FIELD, "tangfulin", Field.Store.YES, Field.Index.NOT_ANALYZED)); writer.addDocument(doc); } diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java --- a/lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java Sat Sep 11 12:37:22 2010 -0400 @@ -39,7 +39,7 @@ RandomIndexWriter writer = new RandomIndexWriter(random, directory); for (int i = 0; i < categories.length; i++) { Document doc = new Document(); - doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED)); + doc.add(newField("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED)); writer.addDocument(doc); } IndexReader reader = writer.getReader(); diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java --- a/lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java Sat Sep 11 12:37:22 2010 -0400 @@ -48,7 +48,7 @@ RandomIndexWriter writer = new RandomIndexWriter(random, dir, new MockAnalyzer(MockTokenizer.KEYWORD, false)); Document doc = new Document(); - Field field = new Field("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED); + Field field = newField("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED); doc.add(field); int num = 2000 * RANDOM_MULTIPLIER; diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java --- a/lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java Sat Sep 11 12:37:22 2010 -0400 @@ -34,7 +34,7 @@ Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, dir); Document doc = new Document(); - doc.add(new Field("field", "value", Store.NO, Index.ANALYZED)); + doc.add(newField("field", "value", Store.NO, Index.ANALYZED)); writer.addDocument(doc); IndexReader reader = writer.getReader(); writer.close(); diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java --- a/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java Sat Sep 11 12:37:22 2010 -0400 @@ -47,7 +47,7 @@ directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, directory); Document doc = new Document(); - doc.add(new Field(FN, + doc.add(newField(FN, "the quick brown fox jumps over the lazy ??? dog 493432 49344", Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(doc); diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java --- a/lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java Sat Sep 11 12:37:22 2010 -0400 @@ -47,7 +47,7 @@ RandomIndexWriter writer = new RandomIndexWriter(random, dir); Document doc = new Document(); - Field field = new Field("field", "", Field.Store.NO, Field.Index.ANALYZED); + Field field = newField("field", "", Field.Store.NO, Field.Index.ANALYZED); doc.add(field); NumberFormat df = new DecimalFormat("0000", new DecimalFormatSymbols(Locale.ENGLISH)); diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/search/TestRegexpRandom2.java --- a/lucene/src/test/org/apache/lucene/search/TestRegexpRandom2.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/search/TestRegexpRandom2.java Sat Sep 11 12:37:22 2010 -0400 @@ -56,7 +56,7 @@ RandomIndexWriter writer = new RandomIndexWriter(random, dir, new MockAnalyzer(MockTokenizer.KEYWORD, false)); Document doc = new Document(); - Field field = new Field("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED); + Field field = newField("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED); doc.add(field); List terms = new ArrayList(); int num = 2000 * RANDOM_MULTIPLIER; diff -r 9526a98cc468 -r edde6f3b61a6 lucene/src/test/org/apache/lucene/search/TestScorerPerf.java --- a/lucene/src/test/org/apache/lucene/search/TestScorerPerf.java Sat Sep 11 03:50:17 2010 +0000 +++ b/lucene/src/test/org/apache/lucene/search/TestScorerPerf.java Sat Sep 11 12:37:22 2010 -0400 @@ -64,7 +64,7 @@ Document d = new Document(); for (int j=0; j