Index: src/test/org/apache/lucene/TestDemo.java
===================================================================
--- src/test/org/apache/lucene/TestDemo.java	(revision 836056)
+++ src/test/org/apache/lucene/TestDemo.java	(working copy)
@@ -33,7 +33,6 @@
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.Version;
-import org.apache.lucene.util._TestUtil;
 
 /**
  * A very simple demo used in the API documentation (src/java/overview.html).
Index: src/test/org/apache/lucene/document/TestNumberTools.java
===================================================================
--- src/test/org/apache/lucene/document/TestNumberTools.java	(revision 836056)
+++ src/test/org/apache/lucene/document/TestNumberTools.java	(working copy)
@@ -29,7 +29,7 @@
     }
 
     public void testMax() {
-        // make sure the constants convert to their equivelents
+        // make sure the constants convert to their equivalents
         assertEquals(Long.MAX_VALUE, NumberTools
                 .stringToLong(NumberTools.MAX_STRING_VALUE));
         assertEquals(NumberTools.MAX_STRING_VALUE, NumberTools
Index: src/test/org/apache/lucene/document/TestDateTools.java
===================================================================
--- src/test/org/apache/lucene/document/TestDateTools.java	(revision 836056)
+++ src/test/org/apache/lucene/document/TestDateTools.java	(working copy)
@@ -9,7 +9,6 @@
 import java.util.Locale;
 
 import org.apache.lucene.util.LocalizedTestCase;
-import org.apache.lucene.util.LuceneTestCase;
 
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more
Index: src/test/org/apache/lucene/analysis/TestStopAnalyzer.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestStopAnalyzer.java	(revision 836056)
+++ src/test/org/apache/lucene/analysis/TestStopAnalyzer.java	(working copy)
@@ -40,7 +40,7 @@
   protected void setUp() throws Exception {
     super.setUp();
     
-    Iterator it = StopAnalyzer.ENGLISH_STOP_WORDS_SET.iterator();
+    Iterator<?> it = StopAnalyzer.ENGLISH_STOP_WORDS_SET.iterator();
     while(it.hasNext()) {
       inValidTokens.add(it.next());
     }
Index: src/test/org/apache/lucene/analysis/TestAnalyzers.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestAnalyzers.java	(revision 836056)
+++ src/test/org/apache/lucene/analysis/TestAnalyzers.java	(working copy)
@@ -24,7 +24,6 @@
 import org.apache.lucene.analysis.standard.StandardTokenizer;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.index.Payload;
 import org.apache.lucene.util.Version;
 
Index: src/test/org/apache/lucene/analysis/TestStopFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestStopFilter.java	(revision 836056)
+++ src/test/org/apache/lucene/analysis/TestStopFilter.java	(working copy)
@@ -36,7 +36,7 @@
 
   public void testExactCase() throws IOException {
     StringReader reader = new StringReader("Now is The Time");
-    Set<String> stopWords = new HashSet(Arrays.asList("is", "the", "Time"));
+    Set<String> stopWords = new HashSet<String>(Arrays.asList("is", "the", "Time"));
     TokenStream stream = new StopFilter(false, new WhitespaceTokenizer(reader), stopWords, false);
     final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
     assertTrue(stream.incrementToken());
@@ -48,7 +48,7 @@
 
   public void testIgnoreCase() throws IOException {
     StringReader reader = new StringReader("Now is The Time");
-    Set<String> stopWords = new HashSet(Arrays.asList( "is", "the", "Time" ));
+    Set<String> stopWords = new HashSet<String>(Arrays.asList( "is", "the", "Time" ));
     TokenStream stream = new StopFilter(false, new WhitespaceTokenizer(reader), stopWords, true);
     final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
     assertTrue(stream.incrementToken());
@@ -59,7 +59,7 @@
   public void testStopFilt() throws IOException {
     StringReader reader = new StringReader("Now is The Time");
     String[] stopWords = new String[] { "is", "the", "Time" };
-    Set stopSet = StopFilter.makeStopSet(stopWords);
+    Set<Object> stopSet = StopFilter.makeStopSet(stopWords);
     TokenStream stream = new StopFilter(false, new WhitespaceTokenizer(reader), stopSet);
     final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
     assertTrue(stream.incrementToken());
@@ -74,16 +74,16 @@
    */
   public void testStopPositons() throws IOException {
     StringBuilder sb = new StringBuilder();
-    ArrayList a = new ArrayList();
+    ArrayList<String> a = new ArrayList<String>();
     for (int i=0; i<20; i++) {
       String w = English.intToEnglish(i).trim();
       sb.append(w).append(" ");
       if (i%3 != 0) a.add(w);
     }
     log(sb.toString());
-    String stopWords[] = (String[]) a.toArray(new String[0]);
+    String stopWords[] = a.toArray(new String[0]);
     for (int i=0; i<a.size(); i++) log("Stop: "+stopWords[i]);
-    Set stopSet = StopFilter.makeStopSet(stopWords);
+    Set<Object> stopSet = StopFilter.makeStopSet(stopWords);
     // with increments
     StringReader reader = new StringReader(sb.toString());
     StopFilter stpf = new StopFilter(false, new WhitespaceTokenizer(reader), stopSet);
@@ -93,8 +93,8 @@
     stpf = new StopFilter(false, new WhitespaceTokenizer(reader), stopSet);
     doTestStopPositons(stpf,false);
     // with increments, concatenating two stop filters
-    ArrayList a0 = new ArrayList();
-    ArrayList a1 = new ArrayList();
+    ArrayList<String> a0 = new ArrayList<String>();
+    ArrayList<String> a1 = new ArrayList<String>();
     for (int i=0; i<a.size(); i++) {
       if (i%2==0) { 
         a0.add(a.get(i));
@@ -102,12 +102,12 @@
         a1.add(a.get(i));
       }
     }
-    String stopWords0[] = (String[]) a0.toArray(new String[0]);
+    String stopWords0[] =  a0.toArray(new String[0]);
     for (int i=0; i<a0.size(); i++) log("Stop0: "+stopWords0[i]);
-    String stopWords1[] = (String[]) a1.toArray(new String[0]);
+    String stopWords1[] =  a1.toArray(new String[0]);
     for (int i=0; i<a1.size(); i++) log("Stop1: "+stopWords1[i]);
-    Set stopSet0 = StopFilter.makeStopSet(stopWords0);
-    Set stopSet1 = StopFilter.makeStopSet(stopWords1);
+    Set<Object> stopSet0 = StopFilter.makeStopSet(stopWords0);
+    Set<Object> stopSet1 = StopFilter.makeStopSet(stopWords1);
     reader = new StringReader(sb.toString());
     StopFilter stpf0 = new StopFilter(false, new WhitespaceTokenizer(reader), stopSet0); // first part of the set
     stpf0.setEnablePositionIncrements(true);
Index: src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java
===================================================================
--- src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java	(revision 836056)
+++ src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java	(working copy)
@@ -126,7 +126,7 @@
   }
 
   public static final AttributeImpl assertCopyIsEqual(AttributeImpl att) throws Exception {
-    AttributeImpl copy = (AttributeImpl) att.getClass().newInstance();
+    AttributeImpl copy = att.getClass().newInstance();
     att.copyTo(copy);
     assertEquals("Copied instance must be equal", att, copy);
     assertEquals("Copied instance's hashcode must be equal", att.hashCode(), copy.hashCode());
Index: src/test/org/apache/lucene/analysis/TestMappingCharFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestMappingCharFilter.java	(revision 836056)
+++ src/test/org/apache/lucene/analysis/TestMappingCharFilter.java	(working copy)
@@ -18,7 +18,6 @@
 package org.apache.lucene.analysis;
 
 import java.io.StringReader;
-import java.util.List;
 
 public class TestMappingCharFilter extends BaseTokenStreamTestCase {
 
Index: src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java	(revision 836056)
+++ src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java	(working copy)
@@ -26,8 +26,7 @@
 
 import java.io.IOException;
 import java.io.StringReader;
-import java.util.ArrayList;
-import java.util.List;
+
 
 /**
  * tests for the TestTeeSinkTokenFilter
Index: src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
===================================================================
--- src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java	(revision 836056)
+++ src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java	(working copy)
@@ -17,7 +17,6 @@
  * limitations under the License.
  */
 
-import java.util.Set;
 import java.io.StringReader;
 import java.io.IOException;
  
Index: src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java	(revision 836056)
+++ src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java	(working copy)
@@ -1871,7 +1871,7 @@
     };
 
     // Construct input text and expected output tokens
-    List expectedOutputTokens = new ArrayList();
+    List<String> expectedOutputTokens = new ArrayList<String>();
     StringBuilder inputText = new StringBuilder();
     for (int n = 0 ; n < foldings.length ; n += 2) {
       if (n > 0) {
@@ -1892,9 +1892,9 @@
     TokenStream stream = new WhitespaceTokenizer(new StringReader(inputText.toString()));
     ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream);
     TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
-    Iterator expectedIter = expectedOutputTokens.iterator();
+    Iterator<String> expectedIter = expectedOutputTokens.iterator();
     while (expectedIter.hasNext()) {;
-      assertTermEquals((String)expectedIter.next(), filter, termAtt);
+      assertTermEquals(expectedIter.next(), filter, termAtt);
     }
     assertFalse(filter.incrementToken());
   }
Index: src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java	(revision 836056)
+++ src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java	(working copy)
@@ -1,13 +1,9 @@
 package org.apache.lucene.analysis;
 
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+
 import org.apache.lucene.util.Version;
 
-import java.io.StringReader;
 
 /**
  * Copyright 2004 The Apache Software Foundation
Index: src/test/org/apache/lucene/search/TestSimpleExplanationsOfNonMatches.java
===================================================================
--- src/test/org/apache/lucene/search/TestSimpleExplanationsOfNonMatches.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestSimpleExplanationsOfNonMatches.java	(working copy)
@@ -18,24 +18,7 @@
  */
 
 
-import org.apache.lucene.store.RAMDirectory;
-
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
 
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-
-import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.queryParser.ParseException;
-
-import junit.framework.TestCase;
-
-import java.util.Random;
-import java.util.BitSet;
 
 /**
  * subclass of TestSimpleExplanations that verifies non matches.
Index: src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
===================================================================
--- src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java	(revision 836056)
+++ src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java	(working copy)
@@ -21,7 +21,6 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.LowerCaseTokenizer;
-import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
Index: src/test/org/apache/lucene/search/CheckHits.java
===================================================================
--- src/test/org/apache/lucene/search/CheckHits.java	(revision 836056)
+++ src/test/org/apache/lucene/search/CheckHits.java	(working copy)
@@ -45,7 +45,7 @@
     throws IOException {
 
     String d = q.toString(defaultFieldName);
-    Set ignore = new TreeSet();
+    Set<Integer> ignore = new TreeSet<Integer>();
     for (int i = 0; i < results.length; i++) {
       ignore.add(Integer.valueOf(results[i]));
     }
@@ -85,11 +85,11 @@
 
     QueryUtils.check(query,searcher);
     
-    Set correct = new TreeSet();
+    Set<Integer> correct = new TreeSet<Integer>();
     for (int i = 0; i < results.length; i++) {
       correct.add(Integer.valueOf(results[i]));
     }
-    final Set actual = new TreeSet();
+    final Set<Integer> actual = new TreeSet<Integer>();
     final Collector c = new SetCollector(actual);
 
     searcher.search(query, c);
@@ -117,8 +117,8 @@
   }
 
   public static class SetCollector extends Collector {
-    final Set bag;
-    public SetCollector(Set bag) {
+    final Set<Integer> bag;
+    public SetCollector(Set<Integer> bag) {
       this.bag = bag;
     }
     private int base = 0;
@@ -164,12 +164,12 @@
 
     ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
 
-    Set correct = new TreeSet();
+    Set<Integer> correct = new TreeSet<Integer>();
     for (int i = 0; i < results.length; i++) {
       correct.add(Integer.valueOf(results[i]));
     }
 
-    Set actual = new TreeSet();
+    Set<Integer> actual = new TreeSet<Integer>();
     for (int i = 0; i < hits.length; i++) {
       actual.add(Integer.valueOf(hits[i].doc));
     }
Index: src/test/org/apache/lucene/search/TestCustomSearcherSort.java
===================================================================
--- src/test/org/apache/lucene/search/TestCustomSearcherSort.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestCustomSearcherSort.java	(working copy)
@@ -153,7 +153,7 @@
       // make a query without sorting first
     ScoreDoc[] hitsByRank = searcher.search(query, null, 1000).scoreDocs;
     checkHits(hitsByRank, "Sort by rank: "); // check for duplicates
-        Map resultMap = new TreeMap();
+        Map<Integer,Integer> resultMap = new TreeMap<Integer,Integer>();
         // store hits in TreeMap - TreeMap does not allow duplicates; existing entries are silently overwritten
         for(int hitid=0;hitid<hitsByRank.length; ++hitid) {
             resultMap.put(
@@ -190,7 +190,7 @@
    */
     private void checkHits(ScoreDoc[] hits, String prefix) {
         if(hits!=null) {
-            Map idMap = new TreeMap();
+            Map<Integer,Integer> idMap = new TreeMap<Integer,Integer>();
             for(int docnum=0;docnum<hits.length;++docnum) {
                 Integer luceneId = null;
 
@@ -200,7 +200,7 @@
                     message.append("Duplicate key for hit index = ");
                     message.append(docnum);
                     message.append(", previous index = ");
-                    message.append(((Integer)idMap.get(luceneId)).toString());
+                    message.append((idMap.get(luceneId)).toString());
                     message.append(", Lucene ID = ");
                     message.append(luceneId);
                     log(message.toString());
Index: src/test/org/apache/lucene/search/TestTermRangeQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestTermRangeQuery.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestTermRangeQuery.java	(working copy)
@@ -21,7 +21,6 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
Index: src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java	(working copy)
@@ -76,7 +76,7 @@
         query1.add(new Term("body", "blueberry"));
         query2.add(new Term("body", "strawberry"));
 
-        LinkedList termsWithPrefix = new LinkedList();
+        LinkedList<Term> termsWithPrefix = new LinkedList<Term>();
         IndexReader ir = IndexReader.open(indexStore, true);
 
         // this TermEnum gives "piccadilly", "pie" and "pizza".
@@ -89,8 +89,8 @@
             }
         } while (te.next());
 
-        query1.add((Term[])termsWithPrefix.toArray(new Term[0]));
-        query2.add((Term[])termsWithPrefix.toArray(new Term[0]));
+        query1.add(termsWithPrefix.toArray(new Term[0]));
+        query2.add(termsWithPrefix.toArray(new Term[0]));
 
         ScoreDoc[] result;
         result = searcher.search(query1, null, 1000).scoreDocs;
Index: src/test/org/apache/lucene/search/TestElevationComparator.java
===================================================================
--- src/test/org/apache/lucene/search/TestElevationComparator.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestElevationComparator.java	(working copy)
@@ -30,7 +30,7 @@
 
 public class TestElevationComparator extends LuceneTestCase {
 
-  private final Map/*<String, Integer>*/ priority = new HashMap/*<String, Integer>*/();
+  private final Map<String,Integer> priority = new HashMap<String,Integer>();
 
   //@Test
   public void testSorting() throws Throwable {
@@ -126,9 +126,9 @@
 }
 
 class ElevationComparatorSource extends FieldComparatorSource {
-  private final Map/*<String, Integer>*/ priority;
+  private final Map<String,Integer> priority;
 
-  public ElevationComparatorSource(final Map/*<String, Integer>*/ boosts) {
+  public ElevationComparatorSource(final Map<String,Integer> boosts) {
    this.priority = boosts;
   }
 
@@ -152,7 +152,7 @@
 
      private int docVal(int doc) throws IOException {
        String id = idIndex.lookup[idIndex.order[doc]];
-       Integer prio = (Integer) priority.get(id);
+       Integer prio = priority.get(id);
        return prio == null ? 0 : prio.intValue();
      }
 
Index: src/test/org/apache/lucene/search/TestMultiSearcher.java
===================================================================
--- src/test/org/apache/lucene/search/TestMultiSearcher.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestMultiSearcher.java	(working copy)
@@ -253,9 +253,9 @@
     assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 2, document.getFields().size() == 2);
     //Should be one document from each directory
     //they both have two fields, contents and other
-    Set ftl = new HashSet();
+    Set<String> ftl = new HashSet<String>();
     ftl.add("other");
-    SetBasedFieldSelector fs = new SetBasedFieldSelector(ftl, Collections.EMPTY_SET);
+    SetBasedFieldSelector fs = new SetBasedFieldSelector(ftl, Collections. <String> emptySet());
     document = searcher.doc(hits[0].doc, fs);
     assertTrue("document is null and it shouldn't be", document != null);
     assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 1, document.getFields().size() == 1);
@@ -265,7 +265,7 @@
     assertTrue("value is null and it shouldn't be", value != null);
     ftl.clear();
     ftl.add("contents");
-    fs = new SetBasedFieldSelector(ftl, Collections.EMPTY_SET);
+    fs = new SetBasedFieldSelector(ftl, Collections. <String> emptySet());
     document = searcher.doc(hits[1].doc, fs);
     value = document.get("contents");
     assertTrue("value is null and it shouldn't be", value != null);    
Index: src/test/org/apache/lucene/search/TestComplexExplanationsOfNonMatches.java
===================================================================
--- src/test/org/apache/lucene/search/TestComplexExplanationsOfNonMatches.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestComplexExplanationsOfNonMatches.java	(working copy)
@@ -18,24 +18,6 @@
  */
 
 
-import org.apache.lucene.store.RAMDirectory;
-
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-
-import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.queryParser.ParseException;
-
-import junit.framework.TestCase;
-
-import java.util.Random;
-import java.util.BitSet;
 
 /**
  * subclass of TestSimpleExplanations that verifies non matches.
Index: src/test/org/apache/lucene/search/TestTopScoreDocCollector.java
===================================================================
--- src/test/org/apache/lucene/search/TestTopScoreDocCollector.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestTopScoreDocCollector.java	(working copy)
@@ -59,7 +59,7 @@
     bq.setMinimumNumberShouldMatch(1);
     IndexSearcher searcher = new IndexSearcher(dir, true);
     for (int i = 0; i < inOrder.length; i++) {
-      TopDocsCollector tdc = TopScoreDocCollector.create(3, inOrder[i]);
+      TopDocsCollector<ScoreDoc> tdc = TopScoreDocCollector.create(3, inOrder[i]);
       assertEquals("org.apache.lucene.search.TopScoreDocCollector$" + actualTSDCClass[i], tdc.getClass().getName());
       
       searcher.search(new MatchAllDocsQuery(), tdc);
Index: src/test/org/apache/lucene/search/TestThreadSafe.java
===================================================================
--- src/test/org/apache/lucene/search/TestThreadSafe.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestThreadSafe.java	(working copy)
@@ -85,9 +85,8 @@
                 }
               );
 
-      List fields = doc.getFields();
-      for (int i=0; i<fields.size(); i++) {
-        Fieldable f = (Fieldable)fields.get(i);
+      List<Fieldable> fields = doc.getFields();
+      for (final Fieldable f : fields ) {
         validateField(f);
       }
 
Index: src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
===================================================================
--- src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java	(working copy)
@@ -18,8 +18,7 @@
  */
 
 import java.io.IOException;
-import java.text.Collator;
-import java.util.Locale;
+
 
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
@@ -66,7 +65,7 @@
     Query q = new TermQuery(new Term("body","body"));
 
     // test id, bounded on both ends
-    FieldCacheRangeFilter fcrf;
+    FieldCacheRangeFilter<String> fcrf;
     result = search.search(q,fcrf = FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,T), numDocs).scoreDocs;
     assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
     assertEquals("find all", numDocs, result.length);
@@ -213,7 +212,7 @@
     Query q = new TermQuery(new Term("body","body"));
 
     // test id, bounded on both ends
-    FieldCacheRangeFilter fcrf;
+    FieldCacheRangeFilter<Short> fcrf;
     result = search.search(q,fcrf=FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
     assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
     assertEquals("find all", numDocs, result.length);
@@ -305,7 +304,7 @@
 
     // test id, bounded on both ends
         
-    FieldCacheRangeFilter fcrf;
+    FieldCacheRangeFilter<Integer> fcrf;
     result = search.search(q,fcrf=FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
     assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
     assertEquals("find all", numDocs, result.length);
@@ -397,7 +396,7 @@
 
     // test id, bounded on both ends
         
-    FieldCacheRangeFilter fcrf;
+    FieldCacheRangeFilter<Long> fcrf;
     result = search.search(q,fcrf=FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
     assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
     assertEquals("find all", numDocs, result.length);
@@ -550,7 +549,7 @@
     assertTrue(reader.hasDeletions());
 
     ScoreDoc[] result;
-    FieldCacheRangeFilter fcrf;
+    FieldCacheRangeFilter<Byte> fcrf;
     Query q = new TermQuery(new Term("body","body"));
 
     result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
Index: src/test/org/apache/lucene/search/QueryUtils.java
===================================================================
--- src/test/org/apache/lucene/search/QueryUtils.java	(revision 836056)
+++ src/test/org/apache/lucene/search/QueryUtils.java	(working copy)
@@ -352,7 +352,7 @@
 
         List<IndexReader> readerList = new ArrayList<IndexReader>();
         ReaderUtil.gatherSubReaders(readerList, s.getIndexReader());
-        IndexReader[] readers = (IndexReader[]) readerList.toArray(new IndexReader[0]);
+        IndexReader[] readers =  readerList.toArray(new IndexReader[0]);
         for(int i = 0; i < readers.length; i++) {
           IndexReader reader = readers[i];
           Weight w = q.weight(s);
@@ -413,7 +413,7 @@
     
     List<IndexReader> readerList = new ArrayList<IndexReader>();
     ReaderUtil.gatherSubReaders(readerList, s.getIndexReader());
-    IndexReader[] readers = (IndexReader[]) readerList.toArray(new IndexReader[0]);
+    IndexReader[] readers = readerList.toArray(new IndexReader[0]);
     for(int i = 0; i < readers.length; i++) {
       IndexReader reader = readers[i];
       Weight w = q.weight(s);
Index: src/test/org/apache/lucene/search/TestSort.java
===================================================================
--- src/test/org/apache/lucene/search/TestSort.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestSort.java	(working copy)
@@ -634,9 +634,9 @@
   public void testNormalizedScores() throws Exception {
 
     // capture relevancy scores
-    HashMap scoresX = getScores (full.search (queryX, null, 1000).scoreDocs, full);
-    HashMap scoresY = getScores (full.search (queryY, null, 1000).scoreDocs, full);
-    HashMap scoresA = getScores (full.search (queryA, null, 1000).scoreDocs, full);
+    HashMap<String,Float> scoresX = getScores (full.search (queryX, null, 1000).scoreDocs, full);
+    HashMap<String,Float> scoresY = getScores (full.search (queryY, null, 1000).scoreDocs, full);
+    HashMap<String,Float> scoresA = getScores (full.search (queryA, null, 1000).scoreDocs, full);
 
     // we'll test searching locally, remote and multi
     
@@ -977,9 +977,9 @@
     assertEquals (expectedResult, buff.toString());
   }
 
-  private HashMap getScores (ScoreDoc[] hits, Searcher searcher)
+  private HashMap<String,Float> getScores (ScoreDoc[] hits, Searcher searcher)
   throws IOException {
-    HashMap scoreMap = new HashMap();
+    HashMap<String,Float> scoreMap = new HashMap<String,Float>();
     int n = hits.length;
     for (int i=0; i<n; ++i) {
       Document doc = searcher.doc(hits[i].doc);
Index: src/test/org/apache/lucene/search/TestSpanQueryFilter.java
===================================================================
--- src/test/org/apache/lucene/search/TestSpanQueryFilter.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestSpanQueryFilter.java	(working copy)
@@ -16,7 +16,6 @@
  * limitations under the License.
  */
 
-import java.util.Iterator;
 import java.util.List;
 
 import org.apache.lucene.analysis.SimpleAnalyzer;
@@ -58,12 +57,11 @@
     DocIdSet docIdSet = result.getDocIdSet();
     assertTrue("docIdSet is null and it shouldn't be", docIdSet != null);
     assertContainsDocId("docIdSet doesn't contain docId 10", docIdSet, 10);
-    List spans = result.getPositions();
+    List<SpanFilterResult.PositionInfo> spans = result.getPositions();
     assertTrue("spans is null and it shouldn't be", spans != null);
     int size = getDocIdSetSize(docIdSet);
     assertTrue("spans Size: " + spans.size() + " is not: " + size, spans.size() == size);
-    for (Iterator iterator = spans.iterator(); iterator.hasNext();) {
-       SpanFilterResult.PositionInfo info = (SpanFilterResult.PositionInfo) iterator.next();
+    for (final SpanFilterResult.PositionInfo info: spans) {
       assertTrue("info is null and it shouldn't be", info != null);
       //The doc should indicate the bit is on
       assertContainsDocId("docIdSet doesn't contain docId " + info.getDoc(), docIdSet, info.getDoc());
Index: src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java
===================================================================
--- src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java	(working copy)
@@ -54,20 +54,20 @@
     ScoreDoc[] results;
     MatchAllDocsQuery q = new MatchAllDocsQuery();
 
-    List terms = new ArrayList();
+    List<String> terms = new ArrayList<String>();
     terms.add("5");
-    results = searcher.search(q, new FieldCacheTermsFilter(fieldName, (String[]) terms.toArray(new String[0])), numDocs).scoreDocs;
+    results = searcher.search(q, new FieldCacheTermsFilter(fieldName,  terms.toArray(new String[0])), numDocs).scoreDocs;
     assertEquals("Must match nothing", 0, results.length);
 
-    terms = new ArrayList();
+    terms = new ArrayList<String>();
     terms.add("10");
-    results = searcher.search(q, new FieldCacheTermsFilter(fieldName, (String[]) terms.toArray(new String[0])), numDocs).scoreDocs;
+    results = searcher.search(q, new FieldCacheTermsFilter(fieldName,  terms.toArray(new String[0])), numDocs).scoreDocs;
     assertEquals("Must match 1", 1, results.length);
 
-    terms = new ArrayList();
+    terms = new ArrayList<String>();
     terms.add("10");
     terms.add("20");
-    results = searcher.search(q, new FieldCacheTermsFilter(fieldName, (String[]) terms.toArray(new String[0])), numDocs).scoreDocs;
+    results = searcher.search(q, new FieldCacheTermsFilter(fieldName,  terms.toArray(new String[0])), numDocs).scoreDocs;
     assertEquals("Must match 2", 2, results.length);
 
     reader.close();
Index: src/test/org/apache/lucene/search/TestPositionIncrement.java
===================================================================
--- src/test/org/apache/lucene/search/TestPositionIncrement.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestPositionIncrement.java	(working copy)
@@ -275,9 +275,9 @@
     Spans pspans = snq.getSpans(is.getIndexReader());
     while (pspans.next()) {
       //System.out.println(pspans.doc() + " - " + pspans.start() + " - "+ pspans.end());
-      Collection payloads = pspans.getPayload();
+      Collection<byte[]> payloads = pspans.getPayload();
       sawZero |= pspans.start() == 0;
-      for (Iterator it = payloads.iterator(); it.hasNext();) {
+      for (Iterator<byte[]> it = payloads.iterator(); it.hasNext();) {
         count++;
         it.next();
         //System.out.println(new String((byte[]) it.next()));
@@ -302,10 +302,10 @@
 
     sawZero = false;
     PayloadSpanUtil psu = new PayloadSpanUtil(is.getIndexReader());
-    Collection pls = psu.getPayloadsForQuery(snq);
+    Collection<byte[]> pls = psu.getPayloadsForQuery(snq);
     count = pls.size();
-    for (Iterator it = pls.iterator(); it.hasNext();) {
-      String s = new String((byte[]) it.next());
+    for (Iterator<byte[]> it = pls.iterator(); it.hasNext();) {
+      String s = new String(it.next());
       //System.out.println(s);
       sawZero |= s.equals("pos: 0");
     }
Index: src/test/org/apache/lucene/search/TestTermScorer.java
===================================================================
--- src/test/org/apache/lucene/search/TestTermScorer.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestTermScorer.java	(working copy)
@@ -76,7 +76,7 @@
                                        indexReader.termDocs(allTerm), indexSearcher.getSimilarity(),
                                        indexReader.norms(FIELD));
         //we have 2 documents with the term all in them, one document for all the other values
-        final List docs = new ArrayList();
+        final List<TestHit> docs = new ArrayList<TestHit>();
         //must call next first
 
 
@@ -107,8 +107,8 @@
             }
         });
         assertTrue("docs Size: " + docs.size() + " is not: " + 2, docs.size() == 2);
-        TestHit doc0 = (TestHit) docs.get(0);
-        TestHit doc5 = (TestHit) docs.get(1);
+        TestHit doc0 =  docs.get(0);
+        TestHit doc5 =  docs.get(1);
         //The scores should be the same
         assertTrue(doc0.score + " does not equal: " + doc5.score, doc0.score == doc5.score);
         /*
Index: src/test/org/apache/lucene/search/TestDocIdSet.java
===================================================================
--- src/test/org/apache/lucene/search/TestDocIdSet.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestDocIdSet.java	(working copy)
@@ -77,7 +77,7 @@
       };
 	  
     DocIdSetIterator iter = filteredSet.iterator();
-    ArrayList/*<Integer>*/ list = new ArrayList/*<Integer>*/();
+    ArrayList<Integer> list = new ArrayList<Integer>();
     int doc = iter.advance(3);
     if (doc != DocIdSetIterator.NO_MORE_DOCS) {
       list.add(Integer.valueOf(doc));
@@ -88,9 +88,9 @@
 	  
     int[] docs = new int[list.size()];
     int c=0;
-    Iterator/*<Integer>*/ intIter = list.iterator();
+    Iterator<Integer> intIter = list.iterator();
     while(intIter.hasNext()) {
-      docs[c++] = ((Integer) intIter.next()).intValue();
+      docs[c++] = intIter.next().intValue();
     }
     int[] answer = new int[]{4,6,8};
     boolean same = Arrays.equals(answer, docs);
Index: src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestMultiPhraseQuery.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestMultiPhraseQuery.java	(working copy)
@@ -31,7 +31,6 @@
 import org.apache.lucene.util.LuceneTestCase;
 
 import java.io.IOException;
-import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.Collections;
 
@@ -68,7 +67,7 @@
         query1.add(new Term("body", "blueberry"));
         query2.add(new Term("body", "strawberry"));
 
-        LinkedList termsWithPrefix = new LinkedList();
+        LinkedList<Term> termsWithPrefix = new LinkedList<Term>();
         IndexReader ir = IndexReader.open(indexStore, true);
 
         // this TermEnum gives "piccadilly", "pie" and "pizza".
@@ -81,9 +80,9 @@
             }
         } while (te.next());
 
-        query1.add((Term[])termsWithPrefix.toArray(new Term[0]));
+        query1.add(termsWithPrefix.toArray(new Term[0]));
         assertEquals("body:\"blueberry (piccadilly pie pizza)\"", query1.toString());
-        query2.add((Term[])termsWithPrefix.toArray(new Term[0]));
+        query2.add(termsWithPrefix.toArray(new Term[0]));
         assertEquals("body:\"strawberry (piccadilly pie pizza)\"", query2.toString());
 
         ScoreDoc[] result;
@@ -103,7 +102,7 @@
                 termsWithPrefix.add(te.term());
             }
         } while (te.next());
-        query3.add((Term[])termsWithPrefix.toArray(new Term[0]));
+        query3.add(termsWithPrefix.toArray(new Term[0]));
         query3.add(new Term("body", "pizza"));
 
         result = searcher.search(query3, null, 1000).scoreDocs;
Index: src/test/org/apache/lucene/search/TestTermVectors.java
===================================================================
--- src/test/org/apache/lucene/search/TestTermVectors.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestTermVectors.java	(working copy)
@@ -28,7 +28,6 @@
 
 import java.io.IOException;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.Map;
 import java.util.SortedSet;
 
@@ -208,7 +207,7 @@
     String test2 = "computer in a computer lab"; //5 terms
     String test3 = "a chocolate lab grows old"; //5 terms
     String test4 = "eating chocolate with a chocolate lab in an old chocolate colored computer lab"; //13 terms
-    Map test4Map = new HashMap();
+    Map<String,Integer> test4Map = new HashMap<String,Integer>();
     test4Map.put("chocolate", Integer.valueOf(3));
     test4Map.put("lab", Integer.valueOf(2));
     test4Map.put("eating", Integer.valueOf(1));
@@ -304,21 +303,20 @@
         //System.out.println("Term: " + term);
         int freq = freqs[i];
         assertTrue(test4.indexOf(term) != -1);
-        Integer freqInt = (Integer)test4Map.get(term);
+        Integer freqInt = test4Map.get(term);
         assertTrue(freqInt != null);
         assertTrue(freqInt.intValue() == freq);        
       }
       SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
       knownSearcher.reader.getTermFreqVector(hits[1].doc, mapper);
-      SortedSet vectorEntrySet = mapper.getTermVectorEntrySet();
+      SortedSet<TermVectorEntry> vectorEntrySet = mapper.getTermVectorEntrySet();
       assertTrue("mapper.getTermVectorEntrySet() Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
       TermVectorEntry last = null;
-      for (Iterator iterator = vectorEntrySet.iterator(); iterator.hasNext();) {
-         TermVectorEntry tve = (TermVectorEntry) iterator.next();
+      for (final TermVectorEntry tve : vectorEntrySet) {
         if (tve != null && last != null)
         {
           assertTrue("terms are not properly sorted", last.getFrequency() >= tve.getFrequency());
-          Integer expectedFreq = (Integer) test4Map.get(tve.getTerm());
+          Integer expectedFreq =  test4Map.get(tve.getTerm());
           //we expect double the expectedFreq, since there are two fields with the exact same text and we are collapsing all fields
           assertTrue("Frequency is not correct:", tve.getFrequency() == 2*expectedFreq.intValue());
         }
@@ -328,9 +326,9 @@
 
       FieldSortedTermVectorMapper fieldMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
       knownSearcher.reader.getTermFreqVector(hits[1].doc, fieldMapper);
-      Map map = fieldMapper.getFieldToTerms();
+      Map<String,SortedSet<TermVectorEntry>> map = fieldMapper.getFieldToTerms();
       assertTrue("map Size: " + map.size() + " is not: " + 2, map.size() == 2);
-      vectorEntrySet = (SortedSet) map.get("field");
+      vectorEntrySet = map.get("field");
       assertTrue("vectorEntrySet is null and it shouldn't be", vectorEntrySet != null);
       assertTrue("vectorEntrySet Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
       knownSearcher.close();
Index: src/test/org/apache/lucene/search/function/TestDocValues.java
===================================================================
--- src/test/org/apache/lucene/search/function/TestDocValues.java	(revision 836056)
+++ src/test/org/apache/lucene/search/function/TestDocValues.java	(working copy)
@@ -18,7 +18,6 @@
  */
 
 import org.apache.lucene.util.LuceneTestCase;
-import junit.framework.Assert;
 
 /**
  * DocValues TestCase  
Index: src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java
===================================================================
--- src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java	(revision 836056)
+++ src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java	(working copy)
@@ -19,7 +19,6 @@
 
 import java.io.IOException;
 import java.util.HashMap;
-import java.util.Iterator;
 
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.queryParser.QueryParser;
@@ -184,11 +183,11 @@
     TopDocs td5CustomMulAdd = s.search(q5CustomMulAdd,null,1000);
     
     // put results in map so we can verify the scores although they have changed
-    HashMap h1 = topDocsToMap(td1);
-    HashMap h2CustomNeutral = topDocsToMap(td2CustomNeutral);
-    HashMap h3CustomMul = topDocsToMap(td3CustomMul);
-    HashMap h4CustomAdd = topDocsToMap(td4CustomAdd);
-    HashMap h5CustomMulAdd = topDocsToMap(td5CustomMulAdd);
+    HashMap<Integer,Float> h1               = topDocsToMap(td1);
+    HashMap<Integer,Float> h2CustomNeutral  = topDocsToMap(td2CustomNeutral);
+    HashMap<Integer,Float> h3CustomMul      = topDocsToMap(td3CustomMul);
+    HashMap<Integer,Float> h4CustomAdd      = topDocsToMap(td4CustomAdd);
+    HashMap<Integer,Float> h5CustomMulAdd   = topDocsToMap(td5CustomMulAdd);
     
     verifyResults(boost, s, 
         h1, h2CustomNeutral, h3CustomMul, h4CustomAdd, h5CustomMulAdd,
@@ -197,7 +196,7 @@
   
   // verify results are as expected.
   private void verifyResults(float boost, IndexSearcher s, 
-      HashMap h1, HashMap h2customNeutral, HashMap h3CustomMul, HashMap h4CustomAdd, HashMap h5CustomMulAdd,
+      HashMap<Integer,Float> h1, HashMap<Integer,Float> h2customNeutral, HashMap<Integer,Float> h3CustomMul, HashMap<Integer,Float> h4CustomAdd, HashMap<Integer,Float> h5CustomMulAdd,
       Query q1, Query q2, Query q3, Query q4, Query q5) throws Exception {
     
     // verify numbers of matches
@@ -208,8 +207,7 @@
     assertEquals("queries should have same #hits",h1.size(),h5CustomMulAdd.size());
     
     // verify scores ratios
-    for (Iterator it = h1.keySet().iterator(); it.hasNext();) {
-      Integer x = (Integer) it.next();
+    for (final Integer x : h1.keySet()) {
 
       int doc =  x.intValue();
       log("doc = "+doc);
@@ -218,22 +216,22 @@
       log("fieldScore = "+fieldScore);
       assertTrue("fieldScore should not be 0",fieldScore>0);
 
-      float score1 = ((Float)h1.get(x)).floatValue();
+      float score1 = h1.get(x).floatValue();
       logResult("score1=", s, q1, doc, score1);
       
-      float score2 = ((Float)h2customNeutral.get(x)).floatValue();
+      float score2 = h2customNeutral.get(x).floatValue();
       logResult("score2=", s, q2, doc, score2);
       assertEquals("same score (just boosted) for neutral", boost * score1, score2, TEST_SCORE_TOLERANCE_DELTA);
 
-      float score3 = ((Float)h3CustomMul.get(x)).floatValue();
+      float score3 = h3CustomMul.get(x).floatValue();
       logResult("score3=", s, q3, doc, score3);
       assertEquals("new score for custom mul", boost * fieldScore * score1, score3, TEST_SCORE_TOLERANCE_DELTA);
       
-      float score4 = ((Float)h4CustomAdd.get(x)).floatValue();
+      float score4 = h4CustomAdd.get(x).floatValue();
       logResult("score4=", s, q4, doc, score4);
       assertEquals("new score for custom add", boost * (fieldScore + score1), score4, TEST_SCORE_TOLERANCE_DELTA);
       
-      float score5 = ((Float)h5CustomMulAdd.get(x)).floatValue();
+      float score5 = h5CustomMulAdd.get(x).floatValue();
       logResult("score5=", s, q5, doc, score5);
       assertEquals("new score for custom mul add", boost * fieldScore * (score1 + fieldScore), score5, TEST_SCORE_TOLERANCE_DELTA);
     }
@@ -248,8 +246,8 @@
 
   // since custom scoring modifies the order of docs, map results 
   // by doc ids so that we can later compare/verify them 
-  private HashMap topDocsToMap(TopDocs td) {
-    HashMap h = new HashMap(); 
+  private HashMap<Integer,Float> topDocsToMap(TopDocs td) {
+    HashMap<Integer,Float> h = new HashMap<Integer,Float>(); 
     for (int i=0; i<td.totalHits; i++) {
       h.put(Integer.valueOf(td.scoreDocs[i].doc), Float.valueOf(td.scoreDocs[i].score));
     }
Index: src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java	(revision 836056)
+++ src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java	(working copy)
@@ -168,7 +168,7 @@
 
     QueryUtils.checkEqual(q, qr);
 
-    HashSet set = new HashSet();
+    HashSet<Term> set = new HashSet<Term>();
     qr.extractTerms(set);
     assertEquals(2, set.size());
   }
Index: src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestPayloadSpans.java	(revision 836056)
+++ src/test/org/apache/lucene/search/spans/TestPayloadSpans.java	(working copy)
@@ -21,7 +21,6 @@
 import java.io.StringReader;
 import java.util.Collection;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.Set;
 
 import org.apache.lucene.analysis.Analyzer;
@@ -271,13 +270,13 @@
     Spans spans = snq.getSpans(is.getIndexReader());
 
     TopDocs topDocs = is.search(snq, 1);
-    Set payloadSet = new HashSet();
+    Set<String> payloadSet = new HashSet<String>();
     for (int i = 0; i < topDocs.scoreDocs.length; i++) {
       while (spans.next()) {
-        Collection payloads = spans.getPayload();
+        Collection<byte[]> payloads = spans.getPayload();
 
-        for (Iterator it = payloads.iterator(); it.hasNext();) {
-          payloadSet.add(new String((byte[]) it.next()));
+        for (final byte [] payload : payloads) {
+          payloadSet.add(new String(payload));
         }
       }
     }
@@ -305,12 +304,12 @@
     Spans spans = snq.getSpans(is.getIndexReader());
 
     TopDocs topDocs = is.search(snq, 1);
-    Set payloadSet = new HashSet();
+    Set<String> payloadSet = new HashSet<String>();
     for (int i = 0; i < topDocs.scoreDocs.length; i++) {
       while (spans.next()) {
-        Collection payloads = spans.getPayload();
-        for (Iterator it = payloads.iterator(); it.hasNext();) {
-          payloadSet.add(new String((byte[]) it.next()));
+        Collection<byte[]> payloads = spans.getPayload();
+        for (final byte[] payload : payloads) {
+          payloadSet.add(new String(payload));
         }
       }
     }
@@ -338,22 +337,21 @@
     Spans spans = snq.getSpans(is.getIndexReader());
 
     TopDocs topDocs = is.search(snq, 1);
-    Set payloadSet = new HashSet();
+    Set<String> payloadSet = new HashSet<String>();
     for (int i = 0; i < topDocs.scoreDocs.length; i++) {
       while (spans.next()) {
-        Collection payloads = spans.getPayload();
+        Collection<byte[]> payloads = spans.getPayload();
 
-        for (Iterator it = payloads.iterator(); it.hasNext();) {
-          payloadSet.add(new String((byte[]) it.next()));
+        for (final byte [] payload : payloads) {
+          payloadSet.add(new String(payload));
         }
       }
     }
     assertEquals(2, payloadSet.size());
     if(DEBUG) {
-      Iterator pit = payloadSet.iterator();
-      while (pit.hasNext()) {
-        System.out.println("match:" + pit.next());
-      }
+      for (final String payload : payloadSet)
+        System.out.println("match:" +  payload);
+      
     }
     assertTrue(payloadSet.contains("a:Noise:10"));
     assertTrue(payloadSet.contains("k:Noise:11"));
@@ -375,12 +373,10 @@
     IndexReader reader = searcher.getIndexReader();
     PayloadSpanUtil psu = new PayloadSpanUtil(reader);
     
-    Collection payloads = psu.getPayloadsForQuery(new TermQuery(new Term(PayloadHelper.FIELD, "rr")));
+    Collection<byte[]> payloads = psu.getPayloadsForQuery(new TermQuery(new Term(PayloadHelper.FIELD, "rr")));
     if(DEBUG)
       System.out.println("Num payloads:" + payloads.size());
-    Iterator it = payloads.iterator();
-    while(it.hasNext()) {
-      byte[] bytes = (byte[]) it.next();
+    for (final byte [] bytes : payloads) {
       if(DEBUG)
         System.out.println(new String(bytes));
     }
@@ -405,10 +401,9 @@
       }
       //See payload helper, for the PayloadHelper.FIELD field, there is a single byte payload at every token
       if (spans.isPayloadAvailable()) {
-        Collection payload = spans.getPayload();
+        Collection<byte[]> payload = spans.getPayload();
         assertTrue("payload Size: " + payload.size() + " is not: " + expectedNumPayloads, payload.size() == expectedNumPayloads);
-        for (Iterator iterator = payload.iterator(); iterator.hasNext();) {
-           byte[] thePayload = (byte[]) iterator.next();
+        for (final byte [] thePayload : payload) {
           assertTrue("payload[0] Size: " + thePayload.length + " is not: " + expectedPayloadLength,
                   thePayload.length == expectedPayloadLength);
           assertTrue(thePayload[0] + " does not equal: " + expectedFirstByte, thePayload[0] == expectedFirstByte);
@@ -450,12 +445,10 @@
       if(DEBUG)
         System.out.println("\nSpans Dump --");
       if (spans.isPayloadAvailable()) {
-        Collection payload = spans.getPayload();
+        Collection<byte[]> payload = spans.getPayload();
         if(DEBUG)
           System.out.println("payloads for span:" + payload.size());
-        Iterator it = payload.iterator();
-        while(it.hasNext()) {
-          byte[] bytes = (byte[]) it.next();
+        for (final byte [] bytes : payload) {
           if(DEBUG)
             System.out.println("doc:" + spans.doc() + " s:" + spans.start() + " e:" + spans.end() + " "
               + new String(bytes));
@@ -484,8 +477,8 @@
   class PayloadFilter extends TokenFilter {
     String fieldName;
     int numSeen = 0;
-    Set entities = new HashSet();
-    Set nopayload = new HashSet();
+    Set<String> entities = new HashSet<String>();
+    Set<String> nopayload = new HashSet<String>();
     int pos;
     PayloadAttribute payloadAtt;
     TermAttribute termAtt;
Index: src/test/org/apache/lucene/search/spans/TestSpanExplanations.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestSpanExplanations.java	(revision 836056)
+++ src/test/org/apache/lucene/search/spans/TestSpanExplanations.java	(working copy)
@@ -18,24 +18,7 @@
  */
 
 import org.apache.lucene.search.*;
-import org.apache.lucene.store.RAMDirectory;
-
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
 
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-
-import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.queryParser.ParseException;
-
-import junit.framework.TestCase;
-
-import java.util.Random;
-import java.util.BitSet;
 
 /**
  * TestExplanations subclass focusing on span queries
Index: src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java
===================================================================
--- src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java	(revision 836056)
+++ src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java	(working copy)
@@ -63,7 +63,7 @@
     }
 
     @Override
-    public Collection getPayload() throws IOException {
+    public Collection<byte[]> getPayload() throws IOException {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }
 
@@ -96,7 +96,7 @@
   static final class JustCompilePayloadSpans extends Spans {
 
     @Override
-    public Collection getPayload() throws IOException {
+    public Collection<byte[]> getPayload() throws IOException {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }
 
Index: src/test/org/apache/lucene/search/TestTopDocsCollector.java
===================================================================
--- src/test/org/apache/lucene/search/TestTopDocsCollector.java	(revision 836056)
+++ src/test/org/apache/lucene/search/TestTopDocsCollector.java	(working copy)
@@ -30,7 +30,7 @@
 
 public class TestTopDocsCollector extends LuceneTestCase {
 
-  private static final class MyTopsDocCollector extends TopDocsCollector {
+  private static final class MyTopsDocCollector extends TopDocsCollector<ScoreDoc> {
 
     private int idx = 0;
     private int base = 0;
@@ -50,7 +50,7 @@
         maxScore = results[0].score;
       } else {
         for (int i = pq.size(); i > 1; i--) { pq.pop(); }
-        maxScore = ((ScoreDoc) pq.pop()).score;
+        maxScore = pq.pop().score;
       }
       
       return new TopDocs(totalHits, results, maxScore);
@@ -94,10 +94,10 @@
   
   private Directory dir = new RAMDirectory();
 
-  private TopDocsCollector doSearch(int numResults) throws IOException {
+  private TopDocsCollector<ScoreDoc> doSearch(int numResults) throws IOException {
     Query q = new MatchAllDocsQuery();
     IndexSearcher searcher = new IndexSearcher(dir, true);
-    TopDocsCollector tdc = new MyTopsDocCollector(numResults);
+    TopDocsCollector<ScoreDoc> tdc = new MyTopsDocCollector(numResults);
     searcher.search(q, tdc);
     searcher.close();
     return tdc;
@@ -125,7 +125,7 @@
   
   public void testInvalidArguments() throws Exception {
     int numResults = 5;
-    TopDocsCollector tdc = doSearch(numResults);
+    TopDocsCollector<ScoreDoc> tdc = doSearch(numResults);
     
     // start < 0
     assertEquals(0, tdc.topDocs(-1).scoreDocs.length);
@@ -145,17 +145,17 @@
   }
   
   public void testZeroResults() throws Exception {
-    TopDocsCollector tdc = new MyTopsDocCollector(5);
+    TopDocsCollector<ScoreDoc> tdc = new MyTopsDocCollector(5);
     assertEquals(0, tdc.topDocs(0, 1).scoreDocs.length);
   }
   
   public void testFirstResultsPage() throws Exception {
-    TopDocsCollector tdc = doSearch(15);
+    TopDocsCollector<ScoreDoc> tdc = doSearch(15);
     assertEquals(10, tdc.topDocs(0, 10).scoreDocs.length);
   }
   
   public void testSecondResultsPages() throws Exception {
-    TopDocsCollector tdc = doSearch(15);
+    TopDocsCollector<ScoreDoc> tdc = doSearch(15);
     // ask for more results than are available
     assertEquals(5, tdc.topDocs(10, 10).scoreDocs.length);
     
@@ -169,12 +169,12 @@
   }
   
   public void testGetAllResults() throws Exception {
-    TopDocsCollector tdc = doSearch(15);
+    TopDocsCollector<ScoreDoc> tdc = doSearch(15);
     assertEquals(15, tdc.topDocs().scoreDocs.length);
   }
   
   public void testGetResultsFromStart() throws Exception {
-    TopDocsCollector tdc = doSearch(15);
+    TopDocsCollector<ScoreDoc> tdc = doSearch(15);
     // should bring all results
     assertEquals(15, tdc.topDocs(0).scoreDocs.length);
     
@@ -185,7 +185,7 @@
   
   public void testMaxScore() throws Exception {
     // ask for all results
-    TopDocsCollector tdc = doSearch(15);
+    TopDocsCollector<ScoreDoc> tdc = doSearch(15);
     TopDocs td = tdc.topDocs();
     assertEquals(MAX_SCORE, td.getMaxScore(), 0f);
     
@@ -198,7 +198,7 @@
   // This does not test the PQ's correctness, but whether topDocs()
   // implementations return the results in decreasing score order.
   public void testResultsOrder() throws Exception {
-    TopDocsCollector tdc = doSearch(15);
+    TopDocsCollector<ScoreDoc> tdc = doSearch(15);
     ScoreDoc[] sd = tdc.topDocs().scoreDocs;
     
     assertEquals(MAX_SCORE, sd[0].score, 0f);
Index: src/test/org/apache/lucene/search/CachingWrapperFilterHelper.java
===================================================================
--- src/test/org/apache/lucene/search/CachingWrapperFilterHelper.java	(revision 836056)
+++ src/test/org/apache/lucene/search/CachingWrapperFilterHelper.java	(working copy)
@@ -18,7 +18,6 @@
  */
 
 import java.io.IOException;
-import java.util.BitSet;
 import java.util.WeakHashMap;
 
 import junit.framework.TestCase;
@@ -46,11 +45,11 @@
   @Override
   public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
     if (cache == null) {
-      cache = new WeakHashMap();
+      cache = new WeakHashMap<IndexReader,DocIdSet>();
     }
     
     synchronized (cache) {  // check cache
-      DocIdSet cached = (DocIdSet) cache.get(reader);
+      DocIdSet cached = cache.get(reader);
       if (shouldHaveCache) {
         TestCase.assertNotNull("Cache should have data ", cached);
       } else {
Index: src/test/org/apache/lucene/TestSearchForDuplicates.java
===================================================================
--- src/test/org/apache/lucene/TestSearchForDuplicates.java	(revision 836056)
+++ src/test/org/apache/lucene/TestSearchForDuplicates.java	(working copy)
@@ -30,7 +30,6 @@
 import org.apache.lucene.util.Version;
 
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
 import junit.framework.TestSuite;
 import junit.textui.TestRunner;
 
Index: src/test/org/apache/lucene/store/TestBufferedIndexInput.java
===================================================================
--- src/test/org/apache/lucene/store/TestBufferedIndexInput.java	(revision 836056)
+++ src/test/org/apache/lucene/store/TestBufferedIndexInput.java	(working copy)
@@ -292,7 +292,7 @@
 
     private static class MockFSDirectory extends Directory {
 
-      List allIndexInputs = new ArrayList();
+      List<IndexInput> allIndexInputs = new ArrayList<IndexInput>();
 
       Random rand;
 
@@ -310,10 +310,9 @@
       }
 
       public void tweakBufferSizes() {
-        Iterator it = allIndexInputs.iterator();
         //int count = 0;
-        while(it.hasNext()) {
-          BufferedIndexInput bii = (BufferedIndexInput) it.next();
+        for (final IndexInput ip : allIndexInputs) {
+          BufferedIndexInput bii = (BufferedIndexInput) ip;
           int bufferSize = 1024+(int) Math.abs(rand.nextInt() % 32768);
           bii.setBufferSize(bufferSize);
           //count++;
Index: src/test/org/apache/lucene/store/MockRAMDirectory.java
===================================================================
--- src/test/org/apache/lucene/store/MockRAMDirectory.java	(revision 836056)
+++ src/test/org/apache/lucene/store/MockRAMDirectory.java	(working copy)
@@ -18,7 +18,6 @@
  */
 
 import java.io.IOException;
-import java.io.File;
 import java.io.FileNotFoundException;
 import java.util.Iterator;
 import java.util.Random;
@@ -57,9 +56,9 @@
     if (openFiles == null)
       openFiles = new HashMap<String,Integer>();
     if (createdFiles == null)
-      createdFiles = new HashSet();
+      createdFiles = new HashSet<String>();
     if (unSyncedFiles == null)
-      unSyncedFiles = new HashSet();
+      unSyncedFiles = new HashSet<String>();
   }
 
   public MockRAMDirectory() {
@@ -90,9 +89,9 @@
    *  unsynced files. */
   public synchronized void crash() throws IOException {
     crashed = true;
-    openFiles = new HashMap();
+    openFiles = new HashMap<String,Integer>();
     Iterator<String> it = unSyncedFiles.iterator();
-    unSyncedFiles = new HashSet();
+    unSyncedFiles = new HashSet<String>();
     int count = 0;
     while(it.hasNext()) {
       String name = it.next();
@@ -230,7 +229,7 @@
       throw new FileNotFoundException(name);
     else {
       if (openFiles.containsKey(name)) {
-        Integer v = (Integer) openFiles.get(name);
+        Integer v = openFiles.get(name);
         v = Integer.valueOf(v.intValue()+1);
         openFiles.put(name, v);
       } else {
@@ -265,7 +264,7 @@
   @Override
   public synchronized void close() {
     if (openFiles == null) {
-      openFiles = new HashMap();
+      openFiles = new HashMap<String,Integer>();
     }
     if (noDeleteOpenFile && openFiles.size() > 0) {
       // RuntimeException instead of IOException because
@@ -312,7 +311,7 @@
     }
   }
 
-  ArrayList failures;
+  ArrayList<Failure> failures;
 
   /**
    * add a Failure object to the list of objects to be evaluated
@@ -320,7 +319,7 @@
    */
   synchronized public void failOn(Failure fail) {
     if (failures == null) {
-      failures = new ArrayList();
+      failures = new ArrayList<Failure>();
     }
     failures.add(fail);
   }
@@ -332,7 +331,7 @@
   synchronized void maybeThrowDeterministicException() throws IOException {
     if (failures != null) {
       for(int i = 0; i < failures.size(); i++) {
-        ((Failure)failures.get(i)).eval(this);
+        failures.get(i).eval(this);
       }
     }
   }
Index: src/test/org/apache/lucene/store/MockRAMInputStream.java
===================================================================
--- src/test/org/apache/lucene/store/MockRAMInputStream.java	(revision 836056)
+++ src/test/org/apache/lucene/store/MockRAMInputStream.java	(working copy)
@@ -45,7 +45,7 @@
     // all clones get closed:
     if (!isClone) {
       synchronized(dir) {
-        Integer v = (Integer) dir.openFiles.get(name);
+        Integer v = dir.openFiles.get(name);
         // Could be null when MockRAMDirectory.crash() was called
         if (v != null) {
           if (v.intValue() == 1) {
Index: src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
===================================================================
--- src/test/org/apache/lucene/store/TestFileSwitchDirectory.java	(revision 836056)
+++ src/test/org/apache/lucene/store/TestFileSwitchDirectory.java	(working copy)
@@ -33,7 +33,7 @@
    * @throws IOException
    */
   public void testBasic() throws IOException {
-    Set fileExtensions = new HashSet();
+    Set<String> fileExtensions = new HashSet<String>();
     fileExtensions.add("fdt");
     fileExtensions.add("fdx");
     
Index: src/test/org/apache/lucene/store/TestHugeRamFile.java
===================================================================
--- src/test/org/apache/lucene/store/TestHugeRamFile.java	(revision 836056)
+++ src/test/org/apache/lucene/store/TestHugeRamFile.java	(working copy)
@@ -31,13 +31,13 @@
    * buffers under maxint. */
   private static class DenseRAMFile extends RAMFile {
     private long capacity = 0;
-    private HashMap singleBuffers = new HashMap();
+    private HashMap<Integer,byte[]> singleBuffers = new HashMap<Integer,byte[]>();
     @Override
     byte[] newBuffer(int size) {
       capacity += size;
       if (capacity <= MAX_VALUE) {
         // below maxint we reuse buffers
-        byte buf[] = (byte[]) singleBuffers.get(Integer.valueOf(size));
+        byte buf[] = singleBuffers.get(Integer.valueOf(size));
         if (buf==null) {
           buf = new byte[size]; 
           //System.out.println("allocate: "+size);
Index: src/test/org/apache/lucene/store/TestLockFactory.java
===================================================================
--- src/test/org/apache/lucene/store/TestLockFactory.java	(revision 836056)
+++ src/test/org/apache/lucene/store/TestLockFactory.java	(working copy)
@@ -21,7 +21,6 @@
 import java.io.IOException;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.Map;
 
 import org.apache.lucene.analysis.WhitespaceAnalyzer;
@@ -63,8 +62,7 @@
         assertTrue("# calls to makeLock is 0 (after instantiating IndexWriter)",
                    lf.makeLockCount >= 1);
         
-        for(Iterator e = lf.locksCreated.keySet().iterator(); e.hasNext();) {
-            String lockName = (String) e.next();
+        for(final String lockName : lf.locksCreated.keySet()) {
             MockLockFactory.MockLock lock = (MockLockFactory.MockLock) lf.locksCreated.get(lockName);
             assertTrue("# calls to Lock.obtain is 0 (after instantiating IndexWriter)",
                        lock.lockAttempts > 0);
@@ -341,7 +339,7 @@
     public class MockLockFactory extends LockFactory {
 
         public boolean lockPrefixSet;
-        public Map locksCreated = Collections.synchronizedMap(new HashMap());
+        public Map<String,Lock> locksCreated = Collections.synchronizedMap(new HashMap<String,Lock>());
         public int makeLockCount = 0;
 
         @Override
Index: src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java
===================================================================
--- src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java	(revision 836056)
+++ src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java	(working copy)
@@ -130,7 +130,7 @@
   }
   
   public void testBoostsSimple() throws Exception {
-      Map boosts = new HashMap();
+      Map<String,Float> boosts = new HashMap<String,Float>();
       boosts.put("b", Float.valueOf(5));
       boosts.put("t", Float.valueOf(10));
       String[] fields = {"b", "t"};
Index: src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java
===================================================================
--- src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java	(revision 836056)
+++ src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java	(working copy)
@@ -17,7 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
 import java.io.Reader;
 
 import org.apache.lucene.analysis.Analyzer;
Index: src/test/org/apache/lucene/queryParser/TestQueryParser.java
===================================================================
--- src/test/org/apache/lucene/queryParser/TestQueryParser.java	(revision 836056)
+++ src/test/org/apache/lucene/queryParser/TestQueryParser.java	(working copy)
@@ -72,7 +72,7 @@
 public class TestQueryParser extends LocalizedTestCase {
 
   public TestQueryParser(String name) {
-    super(name, new HashSet(Arrays.asList(
+    super(name, new HashSet<String>(Arrays.asList(
       "testLegacyDateRange", "testDateRange",
       "testCJK", "testNumber", "testFarsiRangeCollating",
       "testLocalDateFormat"
Index: src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java
===================================================================
--- src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java	(revision 836056)
+++ src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java	(working copy)
@@ -19,7 +19,6 @@
  * limitations under the License.
  */
 
-import java.util.Iterator;
 import java.util.Collection;
 import java.io.File;
 import java.io.IOException;
@@ -79,7 +78,7 @@
         writer.commit();
       }
     }
-    IndexCommit cp = (IndexCommit) dp.snapshot();
+    IndexCommit cp = dp.snapshot();
     copyFiles(dir, cp);
     writer.close();
     copyFiles(dir, cp);
@@ -182,7 +181,7 @@
   public void backupIndex(Directory dir, SnapshotDeletionPolicy dp) throws Exception {
     // To backup an index we first take a snapshot:
     try {
-      copyFiles(dir, (IndexCommit) dp.snapshot());
+      copyFiles(dir,  dp.snapshot());
     } finally {
       // Make sure to release the snapshot, otherwise these
       // files will never be deleted during this IndexWriter
@@ -196,10 +195,8 @@
     // While we hold the snapshot, and nomatter how long
     // we take to do the backup, the IndexWriter will
     // never delete the files in the snapshot:
-    Collection files = cp.getFileNames();
-    Iterator it = files.iterator();
-    while(it.hasNext()) {
-      final String fileName = (String) it.next();
+    Collection<String> files = cp.getFileNames();
+    for (final String fileName : files) { 
       // NOTE: in a real backup you would not use
       // readFile; you would need to use something else
       // that copies the file to a backup location.  This
Index: src/test/org/apache/lucene/index/TestTransactionRollback.java
===================================================================
--- src/test/org/apache/lucene/index/TestTransactionRollback.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestTransactionRollback.java	(working copy)
@@ -54,12 +54,12 @@
     // System.out.println("Attempting to rollback to "+id);
     String ids="-"+id;
     IndexCommit last=null;
-    Collection commits = IndexReader.listCommits(dir);
-    for (Iterator iterator = commits.iterator(); iterator.hasNext();) {
-      IndexCommit commit = (IndexCommit) iterator.next();
-      Map ud=commit.getUserData();
+    Collection<IndexCommit> commits = IndexReader.listCommits(dir);
+    for (Iterator<IndexCommit> iterator = commits.iterator(); iterator.hasNext();) {
+      IndexCommit commit =  iterator.next();
+      Map<String,String> ud=commit.getUserData();
       if (ud.size() > 0)
-        if (((String) ud.get("index")).endsWith(ids))
+        if (ud.get("index").endsWith(ids))
           last=commit;
     }
 
@@ -68,7 +68,7 @@
 		
     IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(),
                                     new RollbackDeletionPolicy(id), MaxFieldLength.UNLIMITED, last);
-    Map data = new HashMap();
+    Map<String,String> data = new HashMap<String,String>();
     data.put("index", "Rolled back to 1-"+id);
     w.commit(data);
     w.close();
@@ -135,7 +135,7 @@
       w.addDocument(doc);
 			
       if (currentRecordId%10 == 0) {
-        Map data = new HashMap();
+        Map<String,String> data = new HashMap<String,String>();
         data.put("index", "records 1-"+currentRecordId);
         w.commit(data);
       }
@@ -152,18 +152,17 @@
       this.rollbackPoint = rollbackPoint;
     }
 
-    public void onCommit(List commits) throws IOException {
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {
     }
 
-    public void onInit(List commits) throws IOException {
-      for (Iterator iterator = commits.iterator(); iterator.hasNext();) {
-        IndexCommit commit = (IndexCommit) iterator.next();
-        Map userData=commit.getUserData();
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
+      for (final IndexCommit commit : commits) {
+        Map<String,String> userData=commit.getUserData();
         if (userData.size() > 0) {
           // Label for a commit point is "Records 1-30"
           // This code reads the last id ("30" in this example) and deletes it
           // if it is after the desired rollback point
-          String x = (String) userData.get("index");
+          String x = userData.get("index");
           String lastVal = x.substring(x.lastIndexOf("-")+1);
           int last = Integer.parseInt(lastVal);
           if (last>rollbackPoint) {
@@ -186,10 +185,10 @@
 
   class DeleteLastCommitPolicy implements IndexDeletionPolicy {
 
-    public void onCommit(List commits) throws IOException {}
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {}
 
-    public void onInit(List commits) throws IOException {
-      ((IndexCommit) commits.get(commits.size()-1)).delete();
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
+      commits.get(commits.size()-1).delete();
     }
   }
 
@@ -208,7 +207,7 @@
 	
   // Keeps all commit points (used to build index)
   class KeepAllDeletionPolicy implements IndexDeletionPolicy {
-    public void onCommit(List commits) throws IOException {}
-    public void onInit(List commits) throws IOException {}
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {}
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {}
   }
 }
Index: src/test/org/apache/lucene/index/DocHelper.java
===================================================================
--- src/test/org/apache/lucene/index/DocHelper.java	(revision 836056)
+++ src/test/org/apache/lucene/index/DocHelper.java	(working copy)
@@ -109,7 +109,7 @@
   
   
   
-  public static Map nameValues = null;
+  public static Map<String,Object> nameValues = null;
 
   // ordered list of all the fields...
   // could use LinkedHashMap for this purpose if Java1.4 is OK
@@ -130,17 +130,16 @@
     largeLazyField//placeholder for large field, since this is null.  It must always be last
   };
 
-  // Map<String fieldName, Fieldable field>
-  public static Map all=new HashMap();
-  public static Map indexed=new HashMap();
-  public static Map stored=new HashMap();
-  public static Map unstored=new HashMap();
-  public static Map unindexed=new HashMap();
-  public static Map termvector=new HashMap();
-  public static Map notermvector=new HashMap();
-  public static Map lazy= new HashMap();
-  public static Map noNorms=new HashMap();
-  public static Map noTf=new HashMap();
+  public static Map<String,Fieldable> all     =new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> indexed =new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> stored  =new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> unstored=new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> unindexed=new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> termvector=new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> notermvector=new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> lazy= new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> noNorms=new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> noTf=new HashMap<String,Fieldable>();
 
   static {
     //Initialize the large Lazy Field
@@ -175,14 +174,14 @@
   }
 
 
-  private static void add(Map map, Fieldable field) {
+  private static void add(Map<String,Fieldable> map, Fieldable field) {
     map.put(field.name(), field);
   }
 
 
   static
   {
-    nameValues = new HashMap();
+    nameValues = new HashMap<String,Object>();
     nameValues.put(TEXT_FIELD_1_KEY, FIELD_1_TEXT);
     nameValues.put(TEXT_FIELD_2_KEY, FIELD_2_TEXT);
     nameValues.put(TEXT_FIELD_3_KEY, FIELD_3_TEXT);
Index: src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
===================================================================
--- src/test/org/apache/lucene/index/TestBackwardsCompatibility.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestBackwardsCompatibility.java	(working copy)
@@ -286,7 +286,7 @@
     for(int i=0;i<35;i++) {
       if (!reader.isDeleted(i)) {
         Document d = reader.document(i);
-        List fields = d.getFields();
+        List<Fieldable> fields = d.getFields();
         if (!oldName.startsWith("19.") &&
             !oldName.startsWith("20.") &&
             !oldName.startsWith("21.") &&
@@ -295,19 +295,19 @@
           if (d.getField("content3") == null) {
             final int numFields = oldName.startsWith("29.") ? 7 : 5;
             assertEquals(numFields, fields.size());
-            Field f = (Field) d.getField("id");
+            Field f =  d.getField("id");
             assertEquals(""+i, f.stringValue());
 
-            f = (Field) d.getField("utf8");
+            f = d.getField("utf8");
             assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
 
-            f = (Field) d.getField("autf8");
+            f =  d.getField("autf8");
             assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
         
-            f = (Field) d.getField("content2");
+            f = d.getField("content2");
             assertEquals("here is more content with aaa aaa aaa", f.stringValue());
 
-            f = (Field) d.getField("fie\u2C77ld");
+            f = d.getField("fie\u2C77ld");
             assertEquals("field with non-ascii name", f.stringValue());
           }
         }       
Index: src/test/org/apache/lucene/index/TestIndexReaderReopen.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReaderReopen.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestIndexReaderReopen.java	(working copy)
@@ -20,9 +20,10 @@
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
-import java.util.Iterator;
+
 import java.util.List;
 import java.util.Random;
 import java.util.Map;
@@ -732,13 +733,13 @@
       }      
     };
     
-    final List readers = Collections.synchronizedList(new ArrayList());
+    final List<ReaderCouple> readers = Collections.synchronizedList(new ArrayList<ReaderCouple>());
     IndexReader firstReader = IndexReader.open(dir, false);
     IndexReader reader = firstReader;
     final Random rnd = newRandom();
     
     ReaderThread[] threads = new ReaderThread[n];
-    final Set readersToClose = Collections.synchronizedSet(new HashSet());
+    final Set<IndexReader> readersToClose = Collections.synchronizedSet(new HashSet<IndexReader>());
     
     for (int i = 0; i < n; i++) {
       if (i % 10 == 0) {
@@ -806,7 +807,7 @@
             while (!stopped) {
               int numReaders = readers.size();
               if (numReaders > 0) {
-                ReaderCouple c = (ReaderCouple) readers.get(rnd.nextInt(numReaders));
+                ReaderCouple c =  readers.get(rnd.nextInt(numReaders));
                 TestIndexReader.assertIndexEquals(c.newReader, c.refreshedReader);
               }
               
@@ -845,17 +846,15 @@
       
     }
     
-    Iterator it = readersToClose.iterator();
-    while (it.hasNext()) {
-      ((IndexReader) it.next()).close();
+    for (final IndexReader readerToClose : readersToClose) {
+      readerToClose.close();
     }
     
     firstReader.close();
     reader.close();
     
-    it = readersToClose.iterator();
-    while (it.hasNext()) {
-      assertReaderClosed((IndexReader) it.next(), true, true);
+    for (final IndexReader readerToClose : readersToClose) {
+      assertReaderClosed(readerToClose, true, true);
     }
 
     assertReaderClosed(reader, true, true);
@@ -1185,9 +1184,9 @@
   }
 
   private static class KeepAllCommits implements IndexDeletionPolicy {
-    public void onInit(List commits) {
+    public void onInit(List<? extends IndexCommit> commits) {
     }
-    public void onCommit(List commits) {
+    public void onCommit(List<? extends IndexCommit> commits) {
     }
   }
 
@@ -1198,13 +1197,13 @@
       Document doc = new Document();
       doc.add(new Field("id", ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
       writer.addDocument(doc);
-      Map data = new HashMap();
+      Map<String,String> data = new HashMap<String,String>();
       data.put("index", i+"");
       writer.commit(data);
     }
     for(int i=0;i<4;i++) {
       writer.deleteDocuments(new Term("id", ""+i));
-      Map data = new HashMap();
+      Map<String,String> data = new HashMap<String,String>();
       data.put("index", (4+i)+"");
       writer.commit(data);
     }
@@ -1214,9 +1213,8 @@
     assertEquals(0, r.numDocs());
     assertEquals(4, r.maxDoc());
 
-    Iterator it = IndexReader.listCommits(dir).iterator();
-    while(it.hasNext()) {
-      IndexCommit commit = (IndexCommit) it.next();
+    Collection<IndexCommit> commits = IndexReader.listCommits(dir);
+    for (final IndexCommit commit : commits) {
       IndexReader r2 = r.reopen(commit);
       assertTrue(r2 != r);
 
@@ -1228,13 +1226,13 @@
         // expected
       }
 
-      final Map s = commit.getUserData();
+      final Map<String,String> s = commit.getUserData();
       final int v;
       if (s.size() == 0) {
         // First commit created by IW
         v = -1;
       } else {
-        v = Integer.parseInt((String) s.get("index"));
+        v = Integer.parseInt(s.get("index"));
       }
       if (v < 4) {
         assertEquals(1+v, r2.numDocs());
Index: src/test/org/apache/lucene/index/TestAtomicUpdate.java
===================================================================
--- src/test/org/apache/lucene/index/TestAtomicUpdate.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestAtomicUpdate.java	(working copy)
@@ -20,8 +20,6 @@
 import org.apache.lucene.store.*;
 import org.apache.lucene.document.*;
 import org.apache.lucene.analysis.*;
-import org.apache.lucene.search.*;
-import org.apache.lucene.queryParser.*;
 
 import java.util.Random;
 import java.io.File;
Index: src/test/org/apache/lucene/index/TestParallelReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestParallelReader.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestParallelReader.java	(working copy)
@@ -71,7 +71,7 @@
     ParallelReader pr = new ParallelReader();
     pr.add(IndexReader.open(dir1, false));
     pr.add(IndexReader.open(dir2, false));
-    Collection fieldNames = pr.getFieldNames(IndexReader.FieldOption.ALL);
+    Collection<String> fieldNames = pr.getFieldNames(IndexReader.FieldOption.ALL);
     assertEquals(4, fieldNames.size());
     assertTrue(fieldNames.contains("f1"));
     assertTrue(fieldNames.contains("f2"));
Index: src/test/org/apache/lucene/index/TestDeletionPolicy.java
===================================================================
--- src/test/org/apache/lucene/index/TestDeletionPolicy.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestDeletionPolicy.java	(working copy)
@@ -19,7 +19,6 @@
 
 import java.io.IOException;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
 import java.util.Collection;
@@ -43,14 +42,14 @@
 
 public class TestDeletionPolicy extends LuceneTestCase
 {
-  private void verifyCommitOrder(List commits) throws IOException {
-    final IndexCommit firstCommit = ((IndexCommit) commits.get(0));
+  private void verifyCommitOrder(List<? extends IndexCommit> commits) throws IOException {
+    final IndexCommit firstCommit =  commits.get(0);
     long last = SegmentInfos.generationFromSegmentsFileName(firstCommit.getSegmentsFileName());
     assertEquals(last, firstCommit.getGeneration());
     long lastVersion = firstCommit.getVersion();
     long lastTimestamp = firstCommit.getTimestamp();
     for(int i=1;i<commits.size();i++) {
-      final IndexCommit commit = ((IndexCommit) commits.get(i));
+      final IndexCommit commit =  commits.get(i);
       long now = SegmentInfos.generationFromSegmentsFileName(commit.getSegmentsFileName());
       long nowVersion = commit.getVersion();
       long nowTimestamp = commit.getTimestamp();
@@ -68,12 +67,12 @@
     int numOnInit;
     int numOnCommit;
     Directory dir;
-    public void onInit(List commits) throws IOException {
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
       verifyCommitOrder(commits);
       numOnInit++;
     }
-    public void onCommit(List commits) throws IOException {
-      IndexCommit lastCommit = (IndexCommit) commits.get(commits.size()-1);
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {
+      IndexCommit lastCommit =  commits.get(commits.size()-1);
       IndexReader r = IndexReader.open(dir, true);
       assertEquals("lastCommit.isOptimized()=" + lastCommit.isOptimized() + " vs IndexReader.isOptimized=" + r.isOptimized(), r.isOptimized(), lastCommit.isOptimized());
       r.close();
@@ -89,18 +88,16 @@
   class KeepNoneOnInitDeletionPolicy implements IndexDeletionPolicy {
     int numOnInit;
     int numOnCommit;
-    public void onInit(List commits) throws IOException {
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
       verifyCommitOrder(commits);
       numOnInit++;
       // On init, delete all commit points:
-      Iterator it = commits.iterator();
-      while(it.hasNext()) {
-        final IndexCommit commit = (IndexCommit) it.next();
+      for (final IndexCommit commit : commits) {
         commit.delete();
         assertTrue(commit.isDeleted());
       }
     }
-    public void onCommit(List commits) throws IOException {
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {
       verifyCommitOrder(commits);
       int size = commits.size();
       // Delete all but last one:
@@ -116,25 +113,25 @@
     int numOnCommit;
     int numToKeep;
     int numDelete;
-    Set seen = new HashSet();
+    Set<String> seen = new HashSet<String>();
 
     public KeepLastNDeletionPolicy(int numToKeep) {
       this.numToKeep = numToKeep;
     }
 
-    public void onInit(List commits) throws IOException {
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
       verifyCommitOrder(commits);
       numOnInit++;
       // do no deletions on init
       doDeletes(commits, false);
     }
 
-    public void onCommit(List commits) throws IOException {
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {
       verifyCommitOrder(commits);
       doDeletes(commits, true);
     }
     
-    private void doDeletes(List commits, boolean isCommit) {
+    private void doDeletes(List<? extends IndexCommit> commits, boolean isCommit) {
 
       // Assert that we really are only called for each new
       // commit:
@@ -169,23 +166,21 @@
       this.expirationTimeSeconds = seconds;
     }
 
-    public void onInit(List commits) throws IOException {
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
       verifyCommitOrder(commits);
       onCommit(commits);
     }
 
-    public void onCommit(List commits) throws IOException {
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {
       verifyCommitOrder(commits);
 
-      IndexCommit lastCommit = (IndexCommit) commits.get(commits.size()-1);
+      IndexCommit lastCommit = commits.get(commits.size()-1);
 
       // Any commit older than expireTime should be deleted:
       double expireTime = dir.fileModified(lastCommit.getSegmentsFileName())/1000.0 - expirationTimeSeconds;
 
-      Iterator it = commits.iterator();
 
-      while(it.hasNext()) {
-        IndexCommit commit = (IndexCommit) it.next();
+      for (final IndexCommit commit : commits) {
         double modTime = dir.fileModified(commit.getSegmentsFileName())/1000.0;
         if (commit != lastCommit && modTime < expireTime) {
           commit.delete();
@@ -297,14 +292,12 @@
       assertEquals(2, policy.numOnCommit);
 
       // Test listCommits
-      Collection commits = IndexReader.listCommits(dir);
+      Collection<IndexCommit> commits = IndexReader.listCommits(dir);
       // 1 from opening writer + 2 from closing writer
       assertEquals(3, commits.size());
 
-      Iterator it = commits.iterator();
       // Make sure we can open a reader on each commit:
-      while(it.hasNext()) {
-        IndexCommit commit = (IndexCommit) it.next();
+      for (final IndexCommit commit : commits) {
         IndexReader r = IndexReader.open(commit, null, false);
         r.close();
       }
@@ -356,12 +349,10 @@
     }
     writer.close();
 
-    Collection commits = IndexReader.listCommits(dir);
+    Collection<IndexCommit> commits = IndexReader.listCommits(dir);
     assertEquals(6, commits.size());
     IndexCommit lastCommit = null;
-    Iterator it = commits.iterator();
-    while(it.hasNext()) {
-      IndexCommit commit = (IndexCommit) it.next();
+    for (final IndexCommit commit : commits) {
       if (lastCommit == null || commit.getGeneration() > lastCommit.getGeneration())
         lastCommit = commit;
     }
Index: src/test/org/apache/lucene/index/TestIndexReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReader.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestIndexReader.java	(working copy)
@@ -25,9 +25,11 @@
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.HashMap;
 import java.util.Set;
+import java.util.SortedSet;
 
 import junit.framework.TestSuite;
 import junit.textui.TestRunner;
@@ -72,7 +74,7 @@
     public void testCommitUserData() throws Exception {
       RAMDirectory d = new MockRAMDirectory();
 
-      Map commitUserData = new HashMap();
+      Map<String,String> commitUserData = new HashMap<String,String>();
       commitUserData.put("foo", "fighters");
       
       // set up writer
@@ -156,7 +158,7 @@
         writer.close();
         // set up reader
         IndexReader reader = IndexReader.open(d, false);
-        Collection fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
+        Collection<String> fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
         assertTrue(fieldNames.contains("keyword"));
         assertTrue(fieldNames.contains("text"));
         assertTrue(fieldNames.contains("unindexed"));
@@ -260,12 +262,12 @@
     IndexReader reader = IndexReader.open(d, false);
     FieldSortedTermVectorMapper mapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
     reader.getTermFreqVector(0, mapper);
-    Map map = mapper.getFieldToTerms();
+    Map<String,SortedSet<TermVectorEntry>> map = mapper.getFieldToTerms();
     assertTrue("map is null and it shouldn't be", map != null);
     assertTrue("map Size: " + map.size() + " is not: " + 4, map.size() == 4);
-    Set set = (Set) map.get("termvector");
-    for (Iterator iterator = set.iterator(); iterator.hasNext();) {
-      TermVectorEntry entry = (TermVectorEntry) iterator.next();
+    Set<TermVectorEntry> set = map.get("termvector");
+    for (Iterator<TermVectorEntry> iterator = set.iterator(); iterator.hasNext();) {
+      TermVectorEntry entry =  iterator.next();
       assertTrue("entry is null and it shouldn't be", entry != null);
       System.out.println("Entry: " + entry);
     }
@@ -380,9 +382,9 @@
         for (int i = 0; i < bin.length; i++) {
           assertEquals(bin[i], data1[i + b1.getBinaryOffset()]);
         }
-        Set lazyFields = new HashSet();
+        Set<String> lazyFields = new HashSet<String>();
         lazyFields.add("bin1");
-        FieldSelector sel = new SetBasedFieldSelector(new HashSet(), lazyFields);
+        FieldSelector sel = new SetBasedFieldSelector(new HashSet<String>(), lazyFields);
         doc = reader.document(reader.maxDoc() - 1, sel);
         Fieldable[] fieldables = doc.getFieldables("bin1");
         assertNotNull(fieldables);
@@ -1340,19 +1342,19 @@
       assertEquals("Only one index is optimized.", index1.isOptimized(), index2.isOptimized());
       
       // check field names
-      Collection fields1 = index1.getFieldNames(FieldOption.ALL);
-      Collection fields2 = index1.getFieldNames(FieldOption.ALL);
+      Collection<String> fields1 = index1.getFieldNames(FieldOption.ALL);
+      Collection<String> fields2 = index1.getFieldNames(FieldOption.ALL);
       assertEquals("IndexReaders have different numbers of fields.", fields1.size(), fields2.size());
-      Iterator it1 = fields1.iterator();
-      Iterator it2 = fields1.iterator();
+      Iterator<String> it1 = fields1.iterator();
+      Iterator<String> it2 = fields1.iterator();
       while (it1.hasNext()) {
-        assertEquals("Different field names.", (String) it1.next(), (String) it2.next());
+        assertEquals("Different field names.", it1.next(), it2.next());
       }
       
       // check norms
       it1 = fields1.iterator();
       while (it1.hasNext()) {
-        String curField = (String) it1.next();
+        String curField = it1.next();
         byte[] norms1 = index1.norms(curField);
         byte[] norms2 = index2.norms(curField);
         if (norms1 != null && norms2 != null)
@@ -1378,14 +1380,14 @@
         if (!index1.isDeleted(i)) {
           Document doc1 = index1.document(i);
           Document doc2 = index2.document(i);
-          fields1 = doc1.getFields();
-          fields2 = doc2.getFields();
-          assertEquals("Different numbers of fields for doc " + i + ".", fields1.size(), fields2.size());
-          it1 = fields1.iterator();
-          it2 = fields2.iterator();
-          while (it1.hasNext()) {
-            Field curField1 = (Field) it1.next();
-            Field curField2 = (Field) it2.next();
+          List<Fieldable> fieldable1 = doc1.getFields();
+          List<Fieldable> fieldable2 = doc2.getFields();
+          assertEquals("Different numbers of fields for doc " + i + ".", fieldable1.size(), fieldable2.size());
+          Iterator<Fieldable> itField1 = fieldable1.iterator();
+          Iterator<Fieldable> itField2 = fieldable2.iterator();
+          while (itField1.hasNext()) {
+            Field curField1 = (Field) itField1.next();
+            Field curField2 = (Field) itField2.next();
             assertEquals("Different fields names for doc " + i + ".", curField1.name(), curField2.name());
             assertEquals("Different field values for doc " + i + ".", curField1.stringValue(), curField2.stringValue());
           }          
@@ -1587,15 +1589,11 @@
     writer.addDocument(createDocument("a"));
     writer.close();
     
-    Collection commits = IndexReader.listCommits(dir);
-    Iterator it = commits.iterator();
-    while(it.hasNext()) {
-      IndexCommit commit = (IndexCommit) it.next();
-      Collection files = commit.getFileNames();
-      HashSet seen = new HashSet();
-      Iterator it2 = files.iterator();
-      while(it2.hasNext()) {
-        String fileName = (String) it2.next();
+    Collection<IndexCommit> commits = IndexReader.listCommits(dir);
+    for (final IndexCommit commit : commits) {
+      Collection<String> files = commit.getFileNames();
+      HashSet<String> seen = new HashSet<String>();
+      for (final String fileName : files) { 
         assertTrue("file " + fileName + " was duplicated", !seen.contains(fileName));
         seen.add(fileName);
       }
Index: src/test/org/apache/lucene/index/TestDoc.java
===================================================================
--- src/test/org/apache/lucene/index/TestDoc.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestDoc.java	(working copy)
@@ -22,7 +22,7 @@
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.StringWriter;
-import java.util.Iterator;
+
 import java.util.LinkedList;
 import java.util.List;
 
@@ -48,7 +48,7 @@
 
     private File workDir;
     private File indexDir;
-    private LinkedList files;
+    private LinkedList<File> files;
 
 
     /** Set the test case. This test case needs
@@ -66,7 +66,7 @@
         Directory directory = FSDirectory.open(indexDir);
         directory.close();
 
-        files = new LinkedList();
+        files = new LinkedList<File>();
         files.add(createOutput("test.txt",
             "This is the first test file"
         ));
@@ -188,9 +188,9 @@
       merger.closeReaders();
       
       if (useCompoundFile) {
-        List filesToDelete = merger.createCompoundFile(merged + ".cfs");
-        for (Iterator iter = filesToDelete.iterator(); iter.hasNext();)
-          si1.dir.deleteFile((String) iter.next());
+        List<String> filesToDelete = merger.createCompoundFile(merged + ".cfs");
+        for (final String fileToDelete : filesToDelete) 
+          si1.dir.deleteFile(fileToDelete);
       }
 
       return new SegmentInfo(merged, si1.docCount + si2.docCount, si1.dir, useCompoundFile, true);
Index: src/test/org/apache/lucene/index/TestStressIndexing2.java
===================================================================
--- src/test/org/apache/lucene/index/TestStressIndexing2.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestStressIndexing2.java	(working copy)
@@ -73,7 +73,7 @@
     // dir1 = FSDirectory.open("foofoofoo");
     Directory dir2 = new MockRAMDirectory();
     // mergeFactor=2; maxBufferedDocs=2; Map docs = indexRandom(1, 3, 2, dir1);
-    Map docs = indexRandom(10, 100, 100, dir1);
+    Map<String,Document> docs = indexRandom(10, 100, 100, dir1);
     indexSerial(docs, dir2);
 
     // verifying verify
@@ -97,7 +97,7 @@
       int range=r.nextInt(20)+1;
       Directory dir1 = new MockRAMDirectory();
       Directory dir2 = new MockRAMDirectory();
-      Map docs = indexRandom(nThreads, iter, range, dir1);
+      Map<String,Document> docs = indexRandom(nThreads, iter, range, dir1);
       indexSerial(docs, dir2);
       verifyEquals(dir1, dir2, "id");
     }
@@ -106,9 +106,9 @@
 
   static Term idTerm = new Term("id","");
   IndexingThread[] threads;
-  static Comparator fieldNameComparator = new Comparator() {
-        public int compare(Object o1, Object o2) {
-          return ((Fieldable)o1).name().compareTo(((Fieldable)o2).name());
+  static Comparator<Fieldable> fieldNameComparator = new Comparator<Fieldable>() {
+        public int compare(Fieldable o1, Fieldable o2) {
+          return o1.name().compareTo(o2.name());
         }
   };
 
@@ -117,12 +117,12 @@
   // everything.
   
   public static class DocsAndWriter {
-    Map docs;
+    Map<String,Document> docs;
     IndexWriter writer;
   }
   
   public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
-    Map docs = new HashMap();
+    Map<String,Document> docs = new HashMap<String,Document>();
     IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
     w.setUseCompoundFile(false);
 
@@ -172,8 +172,8 @@
     return dw;
   }
   
-  public Map indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
-    Map docs = new HashMap();
+  public Map<String,Document> indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
+    Map<String,Document> docs = new HashMap<String,Document>();
     for(int iter=0;iter<3;iter++) {
       IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
       w.setUseCompoundFile(false);
@@ -217,14 +217,12 @@
   }
 
   
-  public static void indexSerial(Map docs, Directory dir) throws IOException {
+  public static void indexSerial(Map<String,Document> docs, Directory dir) throws IOException {
     IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
 
     // index all docs in a single thread
-    Iterator iter = docs.values().iterator();
-    while (iter.hasNext()) {
-      Document d = (Document)iter.next();
-      ArrayList fields = new ArrayList();
+    for (final Document d : docs.values()) {
+      ArrayList<Fieldable> fields = new ArrayList<Fieldable>();
       fields.addAll(d.getFields());
       // put fields in same order each time
       Collections.sort(fields, fieldNameComparator);
@@ -232,7 +230,7 @@
       Document d1 = new Document();
       d1.setBoost(d.getBoost());
       for (int i=0; i<fields.size(); i++) {
-        d1.add((Fieldable) fields.get(i));
+        d1.add(fields.get(i));
       }
       w.addDocument(d1);
       // System.out.println("indexing "+d1);
@@ -391,8 +389,8 @@
   }
 
   public static void verifyEquals(Document d1, Document d2) {
-    List ff1 = d1.getFields();
-    List ff2 = d2.getFields();
+    List<Fieldable> ff1 = d1.getFields();
+    List<Fieldable> ff2 = d2.getFields();
 
     Collections.sort(ff1, fieldNameComparator);
     Collections.sort(ff2, fieldNameComparator);
@@ -405,8 +403,8 @@
 
 
     for (int i=0; i<ff1.size(); i++) {
-      Fieldable f1 = (Fieldable)ff1.get(i);
-      Fieldable f2 = (Fieldable)ff2.get(i);
+      Fieldable f1 = ff1.get(i);
+      Fieldable f2 = ff2.get(i);
       if (f1.isBinary()) {
         assert(f2.isBinary());
         //TODO
@@ -480,7 +478,7 @@
     int base;
     int range;
     int iterations;
-    Map docs = new HashMap();  // Map<String,Document>
+    Map<String,Document> docs = new HashMap<String,Document>();  
     Random r;
 
     public int nextInt(int lim) {
@@ -561,7 +559,7 @@
     public void indexDoc() throws IOException {
       Document d = new Document();
 
-      ArrayList fields = new ArrayList();      
+      ArrayList<Field> fields = new ArrayList<Field>();      
       String idString = getIdString();
       Field idField =  new Field(idTerm.field(), idString, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
       fields.add(idField);
@@ -609,7 +607,7 @@
       }
 
       for (int i=0; i<fields.size(); i++) {
-        d.add((Fieldable) fields.get(i));
+        d.add( fields.get(i));
       }
       w.updateDocument(idTerm.createTerm(idString), d);
       // System.out.println("indexing "+d);
Index: src/test/org/apache/lucene/index/TestIndexWriterReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterReader.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestIndexWriterReader.java	(working copy)
@@ -305,10 +305,10 @@
     final static int NUM_THREADS = 5;
     final Thread[] threads = new Thread[NUM_THREADS];
     IndexWriter mainWriter;
-    List deletedTerms = new ArrayList();
-    LinkedList toDeleteTerms = new LinkedList();
+    List<Term> deletedTerms = new ArrayList<Term>();
+    LinkedList<Term> toDeleteTerms = new LinkedList<Term>();
     Random random;
-    final List failures = new ArrayList();
+    final List<Throwable> failures = new ArrayList<Throwable>();
     
     public DeleteThreads(IndexWriter mainWriter) throws IOException {
       this.mainWriter = mainWriter;
@@ -325,7 +325,7 @@
     
     Term getDeleteTerm() {
       synchronized (toDeleteTerms) {
-        return (Term)toDeleteTerms.removeFirst();
+        return toDeleteTerms.removeFirst();
       }
     }
     
@@ -372,7 +372,7 @@
     int numDirs;
     final Thread[] threads = new Thread[NUM_THREADS];
     IndexWriter mainWriter;
-    final List failures = new ArrayList();
+    final List<Throwable> failures = new ArrayList<Throwable>();
     IndexReader[] readers;
     boolean didClose = false;
     HeavyAtomicInt count = new HeavyAtomicInt(0);
@@ -722,7 +722,7 @@
     final float SECONDS = 3;
 
     final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
-    final List excs = Collections.synchronizedList(new ArrayList());
+    final List<Throwable> excs = Collections.synchronizedList(new ArrayList<Throwable>());
 
     final Thread[] threads = new Thread[NUM_THREAD];
     for(int i=0;i<NUM_THREAD;i++) {
Index: src/test/org/apache/lucene/index/TestLazyBug.java
===================================================================
--- src/test/org/apache/lucene/index/TestLazyBug.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestLazyBug.java	(working copy)
@@ -46,7 +46,7 @@
     "this string is a bigger string, mary had a little lamb, little lamb, little lamb!"
   };
 
-  private static Set dataset = new HashSet(Arrays.asList(data));
+  private static Set<String> dataset = new HashSet<String>(Arrays.asList(data));
   
   private static String MAGIC_FIELD = "f"+(NUM_FIELDS/3);
   
@@ -93,11 +93,11 @@
       Document d = reader.document(docs[i], SELECTOR);
       d.get(MAGIC_FIELD);
       
-      List fields = d.getFields();
-      for (Iterator fi = fields.iterator(); fi.hasNext(); ) {
+      List<Fieldable> fields = d.getFields();
+      for (Iterator<Fieldable> fi = fields.iterator(); fi.hasNext(); ) {
         Fieldable f=null;
         try {
-          f = (Fieldable) fi.next();
+          f =  fi.next();
           String fname = f.name();
           String fval = f.stringValue();
           assertNotNull(docs[i]+" FIELD: "+fname, fval);
Index: src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java	(working copy)
@@ -55,9 +55,9 @@
 
   private int numDocNorms;
 
-  private ArrayList norms;
+  private ArrayList<Float> norms;
 
-  private ArrayList modifiedNorms;
+  private ArrayList<Float> modifiedNorms;
 
   private float lastNorm = 0;
 
@@ -91,19 +91,19 @@
     Directory dir1 = FSDirectory.open(indexDir1);
     IndexWriter.unlock(dir1);
 
-    norms = new ArrayList();
-    modifiedNorms = new ArrayList();
+    norms = new ArrayList<Float>();
+    modifiedNorms = new ArrayList<Float>();
 
     createIndex(dir1);
     doTestNorms(dir1);
 
     // test with a single index: index2
-    ArrayList norms1 = norms;
-    ArrayList modifiedNorms1 = modifiedNorms;
+    ArrayList<Float> norms1 = norms;
+    ArrayList<Float> modifiedNorms1 = modifiedNorms;
     int numDocNorms1 = numDocNorms;
 
-    norms = new ArrayList();
-    modifiedNorms = new ArrayList();
+    norms = new ArrayList<Float>();
+    modifiedNorms = new ArrayList<Float>();
     numDocNorms = 0;
 
     File indexDir2 = new File(tempDir, "lucenetestindex2");
@@ -255,8 +255,8 @@
     // System.out.println("modifyNormsForF1 maxDoc: "+n);
     for (int i = 0; i < n; i += 3) { // modify for every third doc
       int k = (i * 3) % modifiedNorms.size();
-      float origNorm = ((Float) modifiedNorms.get(i)).floatValue();
-      float newNorm = ((Float) modifiedNorms.get(k)).floatValue();
+      float origNorm = modifiedNorms.get(i).floatValue();
+      float newNorm = modifiedNorms.get(k).floatValue();
       // System.out.println("Modifying: for "+i+" from "+origNorm+" to
       // "+newNorm);
       // System.out.println(" and: for "+k+" from "+newNorm+" to "+origNorm);
@@ -281,10 +281,10 @@
       String field = "f" + i;
       byte b[] = ir.norms(field);
       assertEquals("number of norms mismatches", numDocNorms, b.length);
-      ArrayList storedNorms = (i == 1 ? modifiedNorms : norms);
+      ArrayList<Float> storedNorms = (i == 1 ? modifiedNorms : norms);
       for (int j = 0; j < b.length; j++) {
         float norm = Similarity.decodeNorm(b[j]);
-        float norm1 = ((Float) storedNorms.get(j)).floatValue();
+        float norm1 = storedNorms.get(j).floatValue();
         assertEquals("stored norm value of " + field + " for doc " + j + " is "
             + norm + " - a mismatch!", norm, norm1, 0.000001);
       }
Index: src/test/org/apache/lucene/index/TestIndexWriter.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriter.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestIndexWriter.java	(working copy)
@@ -2129,7 +2129,7 @@
         writer.setMergeFactor(2);
 
         final IndexWriter finalWriter = writer;
-        final ArrayList failure = new ArrayList();
+        final ArrayList<Throwable> failure = new ArrayList<Throwable>();
         Thread t1 = new Thread() {
             @Override
             public void run() {
@@ -2158,7 +2158,7 @@
           };
 
         if (failure.size() > 0)
-          throw (Throwable) failure.get(0);
+          throw failure.get(0);
 
         t1.start();
 
@@ -3474,14 +3474,14 @@
       final TermAttribute termAtt = addAttribute(TermAttribute.class);
       final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
       
-      final Iterator tokens = Arrays.asList(new String[]{"a","b","c"}).iterator();
+      final Iterator<String> tokens = Arrays.asList(new String[]{"a","b","c"}).iterator();
       boolean first = true;
       
       @Override
       public boolean incrementToken() {
         if (!tokens.hasNext()) return false;
         clearAttributes();
-        termAtt.setTermBuffer((String) tokens.next());
+        termAtt.setTermBuffer( tokens.next());
         posIncrAtt.setPositionIncrement(first ? 0 : 1);
         first = false;
         return true;
@@ -3642,7 +3642,7 @@
     Directory dir, dir2;
     final static int NUM_INIT_DOCS = 17;
     IndexWriter writer2;
-    final List failures = new ArrayList();
+    final List<Throwable> failures = new ArrayList<Throwable>();
     volatile boolean didClose;
     final IndexReader[] readers;
     final int NUM_COPY;
@@ -3991,7 +3991,7 @@
     w.setMaxBufferedDocs(2);
     for(int j=0;j<17;j++)
       addDoc(w);
-    Map data = new HashMap();
+    Map<String,String> data = new HashMap<String,String>();
     data.put("label", "test1");
     w.commit(data);
     w.close();
@@ -4039,7 +4039,7 @@
   // LUCENE-1429
   public void testOutOfMemoryErrorCausesCloseToFail() throws Exception {
 
-    final List thrown = new ArrayList();
+    final List<Throwable> thrown = new ArrayList<Throwable>();
 
     final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED) {
         @Override
Index: src/test/org/apache/lucene/index/TestIndexFileDeleter.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexFileDeleter.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestIndexFileDeleter.java	(working copy)
@@ -28,7 +28,6 @@
 import org.apache.lucene.document.Field;
 import java.io.*;
 import java.util.*;
-import java.util.zip.*;
 
 /*
   Verify we can read the pre-2.1 file format, do searches
@@ -155,33 +154,34 @@
     Arrays.sort(files);
     Arrays.sort(files2);
     
-    Set dif = difFiles(files, files2);
+    Set<String> dif = difFiles(files, files2);
     
     if (!Arrays.equals(files, files2)) {
       fail("IndexFileDeleter failed to delete unreferenced extra files: should have deleted " + (filesPre.length-files.length) + " files but only deleted " + (filesPre.length - files2.length) + "; expected files:\n    " + asString(files) + "\n  actual files:\n    " + asString(files2)+"\ndif: "+dif);
     }
   }
 
-  private static Set difFiles(String[] files1, String[] files2) {
-    Set set1 = new HashSet();
-    Set set2 = new HashSet();
-    Set extra = new HashSet();
+  private static Set<String> difFiles(String[] files1, String[] files2) {
+    Set<String> set1 = new HashSet<String>();
+    Set<String> set2 = new HashSet<String>();
+    Set<String> extra = new HashSet<String>();
+    
     for (int x=0; x < files1.length; x++) {
       set1.add(files1[x]);
     }
     for (int x=0; x < files2.length; x++) {
       set2.add(files2[x]);
     }
-    Iterator i1 = set1.iterator();
+    Iterator<String> i1 = set1.iterator();
     while (i1.hasNext()) {
-      Object o = i1.next();
+      String o = i1.next();
       if (!set2.contains(o)) {
         extra.add(o);
       }
     }
-    Iterator i2 = set2.iterator();
+    Iterator<String> i2 = set2.iterator();
     while (i2.hasNext()) {
-      Object o = i2.next();
+      String o = i2.next();
       if (!set1.contains(o)) {
         extra.add(o);
       }
Index: src/test/org/apache/lucene/index/TestPayloads.java
===================================================================
--- src/test/org/apache/lucene/index/TestPayloads.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestPayloads.java	(working copy)
@@ -382,7 +382,7 @@
      * This Analyzer uses an WhitespaceTokenizer and PayloadFilter.
      */
     private static class PayloadAnalyzer extends Analyzer {
-        Map fieldToData = new HashMap();
+        Map<String,PayloadData> fieldToData = new HashMap<String,PayloadData>();
         
         void setPayloadData(String field, byte[] data, int offset, int length) {
             fieldToData.put(field, new PayloadData(0, data, offset, length));
@@ -394,7 +394,7 @@
         
         @Override
         public TokenStream tokenStream(String fieldName, Reader reader) {
-            PayloadData payload = (PayloadData) fieldToData.get(fieldName);
+            PayloadData payload =  fieldToData.get(fieldName);
             TokenStream ts = new WhitespaceTokenizer(reader);
             if (payload != null) {
                 if (payload.numFieldInstancesToSkip == 0) {
@@ -550,10 +550,10 @@
     }
     
     private static class ByteArrayPool {
-        private List pool;
+        private List<byte[]> pool;
         
         ByteArrayPool(int capacity, int size) {
-            pool = new ArrayList();
+            pool = new ArrayList<byte[]>();
             for (int i = 0; i < capacity; i++) {
                 pool.add(new byte[size]);
             }
@@ -572,7 +572,7 @@
         }
     
         synchronized byte[] get() {
-            return (byte[]) pool.remove(0);
+            return pool.remove(0);
         }
         
         synchronized void release(byte[] b) {
Index: src/test/org/apache/lucene/index/TestFieldsReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestFieldsReader.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestFieldsReader.java	(working copy)
@@ -100,10 +100,10 @@
     FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
     assertTrue(reader != null);
     assertTrue(reader.size() == 1);
-    Set loadFieldNames = new HashSet();
+    Set<String> loadFieldNames = new HashSet<String>();
     loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY);
     loadFieldNames.add(DocHelper.TEXT_FIELD_UTF1_KEY);
-    Set lazyFieldNames = new HashSet();
+    Set<String> lazyFieldNames = new HashSet<String>();
     //new String[]{DocHelper.LARGE_LAZY_FIELD_KEY, DocHelper.LAZY_FIELD_KEY, DocHelper.LAZY_FIELD_BINARY_KEY};
     lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
     lazyFieldNames.add(DocHelper.LAZY_FIELD_KEY);
@@ -150,10 +150,10 @@
     FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
     assertTrue(reader != null);
     assertTrue(reader.size() == 1);
-    Set loadFieldNames = new HashSet();
+    Set<String> loadFieldNames = new HashSet<String>();
     loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY);
     loadFieldNames.add(DocHelper.TEXT_FIELD_UTF1_KEY);
-    Set lazyFieldNames = new HashSet();
+    Set<String> lazyFieldNames = new HashSet<String>();
     lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
     lazyFieldNames.add(DocHelper.LAZY_FIELD_KEY);
     lazyFieldNames.add(DocHelper.LAZY_FIELD_BINARY_KEY);
@@ -183,9 +183,10 @@
     Document doc = reader.doc(0, fieldSelector);
     assertTrue("doc is null and it shouldn't be", doc != null);
     int count = 0;
-    List l = doc.getFields();
-    for (Iterator iter = l.iterator(); iter.hasNext();) {
-      Field field = (Field) iter.next();
+    List<Fieldable> l = doc.getFields();
+    for (final Fieldable fieldable : l ) {
+      Field field = (Field) fieldable;
+
       assertTrue("field is null and it shouldn't be", field != null);
       String sv = field.stringValue();
       assertTrue("sv is null and it shouldn't be", sv != null);
@@ -220,9 +221,9 @@
     long lazyTime = 0;
     long regularTime = 0;
     int length = 50;
-    Set lazyFieldNames = new HashSet();
+    Set<String> lazyFieldNames = new HashSet<String>();
     lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
-    SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(Collections.EMPTY_SET, lazyFieldNames);
+    SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(Collections. <String> emptySet(), lazyFieldNames);
 
     for (int i = 0; i < length; i++) {
       reader = new FieldsReader(tmpDir, TEST_SEGMENT_NAME, fieldInfos);
Index: src/test/org/apache/lucene/index/TestSegmentMerger.java
===================================================================
--- src/test/org/apache/lucene/index/TestSegmentMerger.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestSegmentMerger.java	(working copy)
@@ -85,7 +85,7 @@
     assertTrue(termDocs != null);
     assertTrue(termDocs.next() == true);
     
-    Collection stored = mergedReader.getFieldNames(IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR);
+    Collection<String> stored = mergedReader.getFieldNames(IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR);
     assertTrue(stored != null);
     //System.out.println("stored size: " + stored.size());
     assertTrue("We do not have 3 fields that were indexed with term vector",stored.size() == 3);
Index: src/test/org/apache/lucene/index/TestTermVectorsReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestTermVectorsReader.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestTermVectorsReader.java	(working copy)
@@ -25,7 +25,6 @@
 import java.util.SortedSet;
 
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
@@ -258,13 +257,13 @@
     assertTrue(reader != null);
     SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
     reader.get(0, mapper);
-    SortedSet set = mapper.getTermVectorEntrySet();
+    SortedSet<TermVectorEntry> set = mapper.getTermVectorEntrySet();
     assertTrue("set is null and it shouldn't be", set != null);
     //three fields, 4 terms, all terms are the same
     assertTrue("set Size: " + set.size() + " is not: " + 4, set.size() == 4);
     //Check offsets and positions
-    for (Iterator iterator = set.iterator(); iterator.hasNext();) {
-      TermVectorEntry tve = (TermVectorEntry) iterator.next();
+    for (Iterator<TermVectorEntry> iterator = set.iterator(); iterator.hasNext();) {
+      TermVectorEntry tve =  iterator.next();
       assertTrue("tve is null and it shouldn't be", tve != null);
       assertTrue("tve.getOffsets() is null and it shouldn't be", tve.getOffsets() != null);
       assertTrue("tve.getPositions() is null and it shouldn't be", tve.getPositions() != null);
@@ -278,8 +277,8 @@
     //three fields, 4 terms, all terms are the same
     assertTrue("set Size: " + set.size() + " is not: " + 4, set.size() == 4);
     //Should have offsets and positions b/c we are munging all the fields together
-    for (Iterator iterator = set.iterator(); iterator.hasNext();) {
-      TermVectorEntry tve = (TermVectorEntry) iterator.next();
+    for (Iterator<TermVectorEntry> iterator = set.iterator(); iterator.hasNext();) {
+      TermVectorEntry tve = iterator.next();
       assertTrue("tve is null and it shouldn't be", tve != null);
       assertTrue("tve.getOffsets() is null and it shouldn't be", tve.getOffsets() != null);
       assertTrue("tve.getPositions() is null and it shouldn't be", tve.getPositions() != null);
@@ -289,14 +288,12 @@
 
     FieldSortedTermVectorMapper fsMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
     reader.get(0, fsMapper);
-    Map map = fsMapper.getFieldToTerms();
+    Map<String,SortedSet<TermVectorEntry>> map = fsMapper.getFieldToTerms();
     assertTrue("map Size: " + map.size() + " is not: " + testFields.length, map.size() == testFields.length);
-    for (Iterator iterator = map.entrySet().iterator(); iterator.hasNext();) {
-      Map.Entry entry = (Map.Entry) iterator.next();
-      SortedSet sortedSet = (SortedSet) entry.getValue();
+    for (Map.Entry<String,SortedSet<TermVectorEntry>> entry : map.entrySet()) {
+      SortedSet<TermVectorEntry> sortedSet =  entry.getValue();
       assertTrue("sortedSet Size: " + sortedSet.size() + " is not: " + 4, sortedSet.size() == 4);
-      for (Iterator inner = sortedSet.iterator(); inner.hasNext();) {
-        TermVectorEntry tve = (TermVectorEntry) inner.next();
+      for (final TermVectorEntry tve : sortedSet) {
         assertTrue("tve is null and it shouldn't be", tve != null);
         //Check offsets and positions.
         assertTrue("tve is null and it shouldn't be", tve != null);
@@ -320,12 +317,10 @@
     reader.get(0, fsMapper);
     map = fsMapper.getFieldToTerms();
     assertTrue("map Size: " + map.size() + " is not: " + testFields.length, map.size() == testFields.length);
-    for (Iterator iterator = map.entrySet().iterator(); iterator.hasNext();) {
-      Map.Entry entry = (Map.Entry) iterator.next();
-      SortedSet sortedSet = (SortedSet) entry.getValue();
+    for (final Map.Entry<String,SortedSet<TermVectorEntry>> entry : map.entrySet()) {
+      SortedSet<TermVectorEntry> sortedSet =  entry.getValue();
       assertTrue("sortedSet Size: " + sortedSet.size() + " is not: " + 4, sortedSet.size() == 4);
-      for (Iterator inner = sortedSet.iterator(); inner.hasNext();) {
-        TermVectorEntry tve = (TermVectorEntry) inner.next();
+      for (final TermVectorEntry tve : sortedSet) {
         assertTrue("tve is null and it shouldn't be", tve != null);
         //Check offsets and positions.
         assertTrue("tve is null and it shouldn't be", tve != null);
Index: src/test/org/apache/lucene/index/TestIndexReaderClone.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReaderClone.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestIndexReaderClone.java	(working copy)
@@ -17,8 +17,6 @@
  * limitations under the License.
  */
 
-import java.io.File;
-import java.io.IOException;
 
 import org.apache.lucene.index.SegmentReader.Norm;
 import org.apache.lucene.search.Similarity;
@@ -28,9 +26,7 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.lucene.store.MockRAMDirectory;
-import org.apache.lucene.store.FSDirectory;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.store.AlreadyClosedException;
 
 /**
  * Tests cloning multiple types of readers, modifying the deletedDocs and norms
Index: src/test/org/apache/lucene/index/TestTransactions.java
===================================================================
--- src/test/org/apache/lucene/index/TestTransactions.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestTransactions.java	(working copy)
@@ -217,6 +217,6 @@
       threads[i].join();
 
     for(int i=0;i<numThread;i++)
-      assertTrue(!((TimedThread) threads[i]).failed);
+      assertTrue(!threads[i].failed);
   }
 }
Index: src/test/org/apache/lucene/index/TestStressIndexing.java
===================================================================
--- src/test/org/apache/lucene/index/TestStressIndexing.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestStressIndexing.java	(working copy)
@@ -21,7 +21,6 @@
 import org.apache.lucene.document.*;
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.search.*;
-import org.apache.lucene.queryParser.*;
 
 import java.util.Random;
 import java.io.File;
@@ -155,7 +154,7 @@
     modifier.close();
 
     for(int i=0;i<numThread;i++)
-      assertTrue(!((TimedThread) threads[i]).failed);
+      assertTrue(! threads[i].failed);
 
     //System.out.println("    Writer: " + indexerThread.count + " iterations");
     //System.out.println("Searcher 1: " + searcherThread1.count + " searchers created");
Index: src/test/org/apache/lucene/index/TestWordlistLoader.java
===================================================================
--- src/test/org/apache/lucene/index/TestWordlistLoader.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestWordlistLoader.java	(working copy)
@@ -30,22 +30,22 @@
 
   public void testWordlistLoading() throws IOException {
     String s = "ONE\n  two \nthree";
-    HashSet wordSet1 = WordlistLoader.getWordSet(new StringReader(s));
+    HashSet<String> wordSet1 = WordlistLoader.getWordSet(new StringReader(s));
     checkSet(wordSet1);
-    HashSet wordSet2 = WordlistLoader.getWordSet(new BufferedReader(new StringReader(s)));
+    HashSet<String> wordSet2 = WordlistLoader.getWordSet(new BufferedReader(new StringReader(s)));
     checkSet(wordSet2);
   }
 
   public void testComments() throws Exception {
     String s = "ONE\n  two \nthree\n#comment";
-    HashSet wordSet1 = WordlistLoader.getWordSet(new StringReader(s), "#");
+    HashSet<String> wordSet1 = WordlistLoader.getWordSet(new StringReader(s), "#");
     checkSet(wordSet1);
     assertFalse(wordSet1.contains("#comment"));
     assertFalse(wordSet1.contains("comment"));
   }
 
 
-  private void checkSet(HashSet wordset) {
+  private void checkSet(HashSet<String> wordset) {
     assertEquals(3, wordset.size());
     assertTrue(wordset.contains("ONE"));		// case is not modified
     assertTrue(wordset.contains("two"));		// surrounding whitespace is removed
Index: src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterExceptions.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestIndexWriterExceptions.java	(working copy)
@@ -109,7 +109,7 @@
     }
   }
 
-  ThreadLocal doFail = new ThreadLocal();
+  ThreadLocal<Thread> doFail = new ThreadLocal<Thread>();
 
   public class MockIndexWriter extends IndexWriter {
     Random r = new java.util.Random(17);
Index: src/test/org/apache/lucene/index/TestCheckIndex.java
===================================================================
--- src/test/org/apache/lucene/index/TestCheckIndex.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestCheckIndex.java	(working copy)
@@ -58,7 +58,7 @@
       fail();
     }
     
-    final CheckIndex.Status.SegmentInfoStatus seg = (CheckIndex.Status.SegmentInfoStatus) indexStatus.segmentInfos.get(0);
+    final CheckIndex.Status.SegmentInfoStatus seg = indexStatus.segmentInfos.get(0);
     assertTrue(seg.openReaderPassed);
 
     assertNotNull(seg.diagnostics);
@@ -84,7 +84,7 @@
     assertEquals(18, seg.termVectorStatus.totVectors);
 
     assertTrue(seg.diagnostics.size() > 0);
-    final List onlySegments = new ArrayList();
+    final List<String> onlySegments = new ArrayList<String>();
     onlySegments.add("_0");
     
     assertTrue(checker.checkIndex(onlySegments).clean == true);
Index: src/test/org/apache/lucene/index/TestSegmentReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestSegmentReader.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestSegmentReader.java	(working copy)
@@ -62,9 +62,8 @@
     //There are 2 unstored fields on the document that are not preserved across writing
     assertTrue(DocHelper.numFields(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size());
     
-    List fields = result.getFields();
-    for (Iterator iter = fields.iterator(); iter.hasNext();) {
-      Fieldable field = (Fieldable) iter.next();
+    List<Fieldable> fields = result.getFields();
+    for (final Fieldable field : fields ) { 
       assertTrue(field != null);
       assertTrue(DocHelper.nameValues.containsKey(field.name()));
     }
@@ -84,19 +83,19 @@
   }    
   
   public void testGetFieldNameVariations() {
-    Collection result = reader.getFieldNames(IndexReader.FieldOption.ALL);
+    Collection<String> result = reader.getFieldNames(IndexReader.FieldOption.ALL);
     assertTrue(result != null);
     assertTrue(result.size() == DocHelper.all.size());
-    for (Iterator iter = result.iterator(); iter.hasNext();) {
-      String s = (String) iter.next();
+    for (Iterator<String> iter = result.iterator(); iter.hasNext();) {
+      String s =  iter.next();
       //System.out.println("Name: " + s);
       assertTrue(DocHelper.nameValues.containsKey(s) == true || s.equals(""));
     }                                                                               
     result = reader.getFieldNames(IndexReader.FieldOption.INDEXED);
     assertTrue(result != null);
     assertTrue(result.size() == DocHelper.indexed.size());
-    for (Iterator iter = result.iterator(); iter.hasNext();) {
-      String s = (String) iter.next();
+    for (Iterator<String> iter = result.iterator(); iter.hasNext();) {
+      String s = iter.next();
       assertTrue(DocHelper.indexed.containsKey(s) == true || s.equals(""));
     }
     
Index: src/test/org/apache/lucene/index/TestPositionBasedTermVectorMapper.java
===================================================================
--- src/test/org/apache/lucene/index/TestPositionBasedTermVectorMapper.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestPositionBasedTermVectorMapper.java	(working copy)
@@ -19,7 +19,6 @@
 
 import java.io.IOException;
 import java.util.BitSet;
-import java.util.Iterator;
 import java.util.Map;
 
 public class TestPositionBasedTermVectorMapper extends LuceneTestCase {
@@ -69,19 +68,19 @@
       mapper.map(token, 1, null, thePositions[i]);
 
     }
-    Map map = mapper.getFieldToTerms();
+    Map<String,Map<Integer,PositionBasedTermVectorMapper.TVPositionInfo>> map = mapper.getFieldToTerms();
     assertTrue("map is null and it shouldn't be", map != null);
     assertTrue("map Size: " + map.size() + " is not: " + 1, map.size() == 1);
-    Map positions = (Map) map.get("test");
+    Map<Integer,PositionBasedTermVectorMapper.TVPositionInfo> positions = map.get("test");
     assertTrue("thePositions is null and it shouldn't be", positions != null);
     
     assertTrue("thePositions Size: " + positions.size() + " is not: " + numPositions, positions.size() == numPositions);
     BitSet bits = new BitSet(numPositions);
-    for (Iterator iterator = positions.entrySet().iterator(); iterator.hasNext();) {
-      Map.Entry entry = (Map.Entry) iterator.next();
-      PositionBasedTermVectorMapper.TVPositionInfo info = (PositionBasedTermVectorMapper.TVPositionInfo) entry.getValue();
+    for (Map.Entry<Integer,PositionBasedTermVectorMapper.TVPositionInfo> entry : positions.entrySet()) {
+    
+      PositionBasedTermVectorMapper.TVPositionInfo info = entry.getValue();
       assertTrue("info is null and it shouldn't be", info != null);
-      int pos = ((Integer) entry.getKey()).intValue();
+      int pos = entry.getKey().intValue();
       bits.set(pos);
       assertTrue(info.getPosition() + " does not equal: " + pos, info.getPosition() == pos);
       assertTrue("info.getOffsets() is null and it shouldn't be", info.getOffsets() != null);
Index: src/test/org/apache/lucene/index/TestByteSlices.java
===================================================================
--- src/test/org/apache/lucene/index/TestByteSlices.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestByteSlices.java	(working copy)
@@ -21,7 +21,7 @@
 public class TestByteSlices extends LuceneTestCase {
 
   private static class ByteBlockAllocator extends ByteBlockPool.Allocator {
-    ArrayList freeByteBlocks = new ArrayList();
+    ArrayList<byte[]> freeByteBlocks = new ArrayList<byte[]>();
     
     /* Allocate another byte[] from the shared pool */
     @Override
@@ -31,7 +31,7 @@
       if (0 == size)
         b = new byte[DocumentsWriter.BYTE_BLOCK_SIZE];
       else
-        b = (byte[]) freeByteBlocks.remove(size-1);
+        b =  freeByteBlocks.remove(size-1);
       return b;
     }
 
Index: src/test/org/apache/lucene/index/TestNorms.java
===================================================================
--- src/test/org/apache/lucene/index/TestNorms.java	(revision 836056)
+++ src/test/org/apache/lucene/index/TestNorms.java	(working copy)
@@ -52,8 +52,8 @@
   private Similarity similarityOne;
   private Analyzer anlzr;
   private int numDocNorms;
-  private ArrayList norms; 
-  private ArrayList modifiedNorms; 
+  private ArrayList<Float> norms; 
+  private ArrayList<Float> modifiedNorms; 
   private float lastNorm = 0;
   private float normDelta = (float) 0.001;
 
@@ -85,19 +85,19 @@
     File indexDir1 = new File(tempDir, "lucenetestindex1");
     Directory dir1 = FSDirectory.open(indexDir1);
 
-    norms = new ArrayList();
-    modifiedNorms = new ArrayList();
+    norms = new ArrayList<Float>();
+    modifiedNorms = new ArrayList<Float>();
 
     createIndex(dir1);
     doTestNorms(dir1);
 
     // test with a single index: index2
-    ArrayList norms1 = norms;
-    ArrayList modifiedNorms1 = modifiedNorms;
+    ArrayList<Float> norms1 = norms;
+    ArrayList<Float> modifiedNorms1 = modifiedNorms;
     int numDocNorms1 = numDocNorms;
 
-    norms = new ArrayList();
-    modifiedNorms = new ArrayList();
+    norms = new ArrayList<Float>();
+    modifiedNorms = new ArrayList<Float>();
     numDocNorms = 0;
     
     File indexDir2 = new File(tempDir, "lucenetestindex2");
@@ -168,8 +168,8 @@
     int n = ir.maxDoc();
     for (int i = 0; i < n; i+=3) { // modify for every third doc
       int k = (i*3) % modifiedNorms.size();
-      float origNorm = ((Float)modifiedNorms.get(i)).floatValue();
-      float newNorm = ((Float)modifiedNorms.get(k)).floatValue();
+      float origNorm = modifiedNorms.get(i).floatValue();
+      float newNorm = modifiedNorms.get(k).floatValue();
       //System.out.println("Modifying: for "+i+" from "+origNorm+" to "+newNorm);
       //System.out.println("      and: for "+k+" from "+newNorm+" to "+origNorm);
       modifiedNorms.set(i, Float.valueOf(newNorm));
@@ -187,10 +187,10 @@
       String field = "f"+i;
       byte b[] = ir.norms(field);
       assertEquals("number of norms mismatches",numDocNorms,b.length);
-      ArrayList storedNorms = (i==1 ? modifiedNorms : norms);
+      ArrayList<Float> storedNorms = (i==1 ? modifiedNorms : norms);
       for (int j = 0; j < b.length; j++) {
         float norm = Similarity.decodeNorm(b[j]);
-        float norm1 = ((Float)storedNorms.get(j)).floatValue();
+        float norm1 = storedNorms.get(j).floatValue();
         assertEquals("stored norm value of "+field+" for doc "+j+" is "+norm+" - a mismatch!", norm, norm1, 0.000001);
       }
     }
Index: src/test/org/apache/lucene/util/TestNumericUtils.java
===================================================================
--- src/test/org/apache/lucene/util/TestNumericUtils.java	(revision 836056)
+++ src/test/org/apache/lucene/util/TestNumericUtils.java	(working copy)
@@ -174,7 +174,7 @@
   
   /** Note: The neededBounds iterator must be unsigned (easier understanding what's happening) */
   protected void assertLongRangeSplit(final long lower, final long upper, int precisionStep,
-    final boolean useBitSet, final Iterator neededBounds
+    final boolean useBitSet, final Iterator<Long> neededBounds
   ) throws Exception {
     final OpenBitSet bits=useBitSet ? new OpenBitSet(upper-lower+1) : null;
     
@@ -189,8 +189,8 @@
         min ^= 0x8000000000000000L;
         max ^= 0x8000000000000000L;
         //System.out.println("Long.valueOf(0x"+Long.toHexString(min>>>shift)+"L),Long.valueOf(0x"+Long.toHexString(max>>>shift)+"L),");
-        assertEquals( "inner min bound", ((Long)neededBounds.next()).longValue(), min>>>shift);
-        assertEquals( "inner max bound", ((Long)neededBounds.next()).longValue(), max>>>shift);
+        assertEquals( "inner min bound", neededBounds.next().longValue(), min>>>shift);
+        assertEquals( "inner max bound", neededBounds.next().longValue(), max>>>shift);
       }
     }, precisionStep, lower, upper);
     
Index: src/test/org/apache/lucene/util/LuceneTestCase.java
===================================================================
--- src/test/org/apache/lucene/util/LuceneTestCase.java	(revision 836056)
+++ src/test/org/apache/lucene/util/LuceneTestCase.java	(working copy)
@@ -24,7 +24,6 @@
 
 import junit.framework.TestCase;
 
-import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.index.ConcurrentMergeScheduler;
 import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.FieldCache.CacheEntry;
Index: src/test/org/apache/lucene/util/LocalizedTestCase.java
===================================================================
--- src/test/org/apache/lucene/util/LocalizedTestCase.java	(revision 836056)
+++ src/test/org/apache/lucene/util/LocalizedTestCase.java	(working copy)
@@ -43,7 +43,7 @@
   /**
    * An optional limited set of testcases that will run under different Locales.
    */
-  private final Set testWithDifferentLocales;
+  private final Set<String> testWithDifferentLocales;
 
   public LocalizedTestCase() {
     super();
@@ -55,12 +55,12 @@
     testWithDifferentLocales = null;
   }
 
-  public LocalizedTestCase(Set testWithDifferentLocales) {
+  public LocalizedTestCase(Set<String> testWithDifferentLocales) {
     super();
     this.testWithDifferentLocales = testWithDifferentLocales;
   }
 
-  public LocalizedTestCase(String name, Set testWithDifferentLocales) {
+  public LocalizedTestCase(String name, Set<String> testWithDifferentLocales) {
     super(name);
     this.testWithDifferentLocales = testWithDifferentLocales;
   }
Index: src/test/org/apache/lucene/util/cache/TestSimpleLRUCache.java
===================================================================
--- src/test/org/apache/lucene/util/cache/TestSimpleLRUCache.java	(revision 836056)
+++ src/test/org/apache/lucene/util/cache/TestSimpleLRUCache.java	(working copy)
@@ -25,7 +25,7 @@
     final int n = 100;
     Object dummy = new Object();
     
-    Cache cache = new SimpleLRUCache(n);
+    Cache<Integer,Object> cache = new SimpleLRUCache<Integer,Object>(n);
     
     for (int i = 0; i < n; i++) {
       cache.put(Integer.valueOf(i), dummy);
Index: src/java/org/apache/lucene/search/CachingWrapperFilter.java
===================================================================
--- src/java/org/apache/lucene/search/CachingWrapperFilter.java	(revision 836056)
+++ src/java/org/apache/lucene/search/CachingWrapperFilter.java	(working copy)
@@ -35,7 +35,7 @@
   /**
    * A transient Filter cache (package private because of test)
    */
-  transient Map<IndexReader, DocIdSet> cache;
+  transient Map<IndexReader,DocIdSet> cache;
   
   private final ReentrantLock lock = new ReentrantLock();
 
Index: src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java
===================================================================
--- src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java	(revision 836056)
+++ src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java	(working copy)
@@ -110,7 +110,7 @@
    *
    * @return A map between field names and a Map.  The sub-Map key is the position as the integer, the value is {@link org.apache.lucene.index.PositionBasedTermVectorMapper.TVPositionInfo}.
    */
-  public Map<String, Map<Integer, TVPositionInfo>>  getFieldToTerms() {
+  public Map<String,Map<Integer,TVPositionInfo>>  getFieldToTerms() {
     return fieldToTerms;
   }
 
