lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mikemcc...@apache.org
Subject svn commit: r887181 [1/2] - in /lucene/java/trunk: ./ src/java/org/apache/lucene/index/ src/java/org/apache/lucene/search/ src/java/org/apache/lucene/util/ src/test/org/apache/lucene/ src/test/org/apache/lucene/analysis/ src/test/org/apache/lucene/anal...
Date Fri, 04 Dec 2009 13:08:04 GMT
Author: mikemccand
Date: Fri Dec  4 13:07:47 2009
New Revision: 887181

URL: http://svn.apache.org/viewvc?rev=887181&view=rev
Log:
LUCENE-2065: use generics throughout unit tests

Modified:
    lucene/java/trunk/CHANGES.txt
    lucene/java/trunk/src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java
    lucene/java/trunk/src/java/org/apache/lucene/search/CachingWrapperFilter.java
    lucene/java/trunk/src/java/org/apache/lucene/util/AttributeImpl.java
    lucene/java/trunk/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java
    lucene/java/trunk/src/test/org/apache/lucene/TestDemo.java
    lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java
    lucene/java/trunk/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java
    lucene/java/trunk/src/test/org/apache/lucene/document/TestDateTools.java
    lucene/java/trunk/src/test/org/apache/lucene/document/TestNumberTools.java
    lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestByteSlices.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestDoc.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestFieldsReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyBug.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestNorms.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestPositionBasedTermVectorMapper.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestSegmentMerger.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestSegmentReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing2.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestTermVectorsReader.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestTransactionRollback.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestTransactions.java
    lucene/java/trunk/src/test/org/apache/lucene/index/TestWordlistLoader.java
    lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java
    lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java
    lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestQueryParser.java
    lucene/java/trunk/src/test/org/apache/lucene/search/CachingWrapperFilterHelper.java
    lucene/java/trunk/src/test/org/apache/lucene/search/CheckHits.java
    lucene/java/trunk/src/test/org/apache/lucene/search/JustCompileSearch.java
    lucene/java/trunk/src/test/org/apache/lucene/search/QueryUtils.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestComplexExplanationsOfNonMatches.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestDocIdSet.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestElevationComparator.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestMultiSearcher.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestSimpleExplanationsOfNonMatches.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestSort.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestSpanQueryFilter.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestTermRangeQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestTermScorer.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestTermVectors.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestThreadSafe.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestTopDocsCollector.java
    lucene/java/trunk/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java
    lucene/java/trunk/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/function/TestDocValues.java
    lucene/java/trunk/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java
    lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
    lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
    lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestSpanExplanations.java
    lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMDirectory.java
    lucene/java/trunk/src/test/org/apache/lucene/store/MockRAMInputStream.java
    lucene/java/trunk/src/test/org/apache/lucene/store/TestBufferedIndexInput.java
    lucene/java/trunk/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
    lucene/java/trunk/src/test/org/apache/lucene/store/TestHugeRamFile.java
    lucene/java/trunk/src/test/org/apache/lucene/store/TestLockFactory.java
    lucene/java/trunk/src/test/org/apache/lucene/util/LocalizedTestCase.java
    lucene/java/trunk/src/test/org/apache/lucene/util/LuceneTestCase.java
    lucene/java/trunk/src/test/org/apache/lucene/util/TestCloseableThreadLocal.java
    lucene/java/trunk/src/test/org/apache/lucene/util/TestNumericUtils.java
    lucene/java/trunk/src/test/org/apache/lucene/util/cache/BaseTestLRU.java
    lucene/java/trunk/src/test/org/apache/lucene/util/cache/TestDoubleBarrelLRUCache.java

Modified: lucene/java/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/java/trunk/CHANGES.txt?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/CHANGES.txt (original)
+++ lucene/java/trunk/CHANGES.txt Fri Dec  4 13:07:47 2009
@@ -66,6 +66,9 @@
 * LUCENE-1844: Speed up the unit tests (Mark Miller, Erick Erickson,
   Mike McCandless)
 
+* LUCENE-2065: Use Java 5 generics throughout our unit tests.  (Kay
+  Kay via Mike McCandless)
+
 ======================= Release 3.0.0 2009-11-25 =======================
 
 Changes in backwards compatibility policy

Modified: lucene/java/trunk/src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java Fri Dec  4 13:07:47 2009
@@ -110,7 +110,7 @@
    *
    * @return A map between field names and a Map.  The sub-Map key is the position as the integer, the value is {@link org.apache.lucene.index.PositionBasedTermVectorMapper.TVPositionInfo}.
    */
-  public Map<String, Map<Integer, TVPositionInfo>>  getFieldToTerms() {
+  public Map<String,Map<Integer,TVPositionInfo>>  getFieldToTerms() {
     return fieldToTerms;
   }
 

Modified: lucene/java/trunk/src/java/org/apache/lucene/search/CachingWrapperFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/search/CachingWrapperFilter.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/search/CachingWrapperFilter.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/search/CachingWrapperFilter.java Fri Dec  4 13:07:47 2009
@@ -35,7 +35,7 @@
   /**
    * A transient Filter cache (package private because of test)
    */
-  transient Map<IndexReader, DocIdSet> cache;
+  transient Map<IndexReader,DocIdSet> cache;
   
   private final ReentrantLock lock = new ReentrantLock();
 

Modified: lucene/java/trunk/src/java/org/apache/lucene/util/AttributeImpl.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/util/AttributeImpl.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/util/AttributeImpl.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/util/AttributeImpl.java Fri Dec  4 13:07:47 2009
@@ -51,7 +51,7 @@
   @Override
   public String toString() {
     StringBuilder buffer = new StringBuilder();
-    Class clazz = this.getClass();
+    Class<?> clazz = this.getClass();
     Field[] fields = clazz.getDeclaredFields();
     try {
       for (int i = 0; i < fields.length; i++) {

Modified: lucene/java/trunk/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java Fri Dec  4 13:07:47 2009
@@ -212,7 +212,7 @@
       
       if (seen.contains(rf)) continue;
 
-      List kids = getAllDecendentReaderKeys(rf.readerKey);
+      List<Object> kids = getAllDecendentReaderKeys(rf.readerKey);
       for (Object kidKey : kids) {
         ReaderField kid = new ReaderField(kidKey, rf.fieldName);
         
@@ -270,7 +270,7 @@
    * the hierarchy of subReaders building up a list of the objects 
    * returned by obj.getFieldCacheKey()
    */
-  private List getAllDecendentReaderKeys(Object seed) {
+  private List<Object> getAllDecendentReaderKeys(Object seed) {
     List<Object> all = new ArrayList<Object>(17); // will grow as we iter
     all.add(seed);
     for (int i = 0; i < all.size(); i++) {

Modified: lucene/java/trunk/src/test/org/apache/lucene/TestDemo.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestDemo.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestDemo.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestDemo.java Fri Dec  4 13:07:47 2009
@@ -33,7 +33,6 @@
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.Version;
-import org.apache.lucene.util._TestUtil;
 
 /**
  * A very simple demo used in the API documentation (src/java/overview.html).

Modified: lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java Fri Dec  4 13:07:47 2009
@@ -30,7 +30,6 @@
 import org.apache.lucene.util.Version;
 
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
 import junit.framework.TestSuite;
 import junit.textui.TestRunner;
 

Modified: lucene/java/trunk/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java Fri Dec  4 13:07:47 2009
@@ -19,7 +19,6 @@
  * limitations under the License.
  */
 
-import java.util.Iterator;
 import java.util.Collection;
 import java.io.File;
 import java.io.IOException;
@@ -79,7 +78,7 @@
         writer.commit();
       }
     }
-    IndexCommit cp = (IndexCommit) dp.snapshot();
+    IndexCommit cp = dp.snapshot();
     copyFiles(dir, cp);
     writer.close();
     copyFiles(dir, cp);
@@ -181,7 +180,7 @@
   public void backupIndex(Directory dir, SnapshotDeletionPolicy dp) throws Exception {
     // To backup an index we first take a snapshot:
     try {
-      copyFiles(dir, (IndexCommit) dp.snapshot());
+      copyFiles(dir,  dp.snapshot());
     } finally {
       // Make sure to release the snapshot, otherwise these
       // files will never be deleted during this IndexWriter
@@ -195,10 +194,8 @@
     // While we hold the snapshot, and nomatter how long
     // we take to do the backup, the IndexWriter will
     // never delete the files in the snapshot:
-    Collection files = cp.getFileNames();
-    Iterator it = files.iterator();
-    while(it.hasNext()) {
-      final String fileName = (String) it.next();
+    Collection<String> files = cp.getFileNames();
+    for (final String fileName : files) { 
       // NOTE: in a real backup you would not use
       // readFile; you would need to use something else
       // that copies the file to a backup location.  This

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/BaseTokenStreamTestCase.java Fri Dec  4 13:07:47 2009
@@ -17,7 +17,6 @@
  * limitations under the License.
  */
 
-import java.util.Set;
 import java.io.StringReader;
 import java.io.IOException;
  

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java Fri Dec  4 13:07:47 2009
@@ -1871,7 +1871,7 @@
     };
 
     // Construct input text and expected output tokens
-    List expectedOutputTokens = new ArrayList();
+    List<String> expectedOutputTokens = new ArrayList<String>();
     StringBuilder inputText = new StringBuilder();
     for (int n = 0 ; n < foldings.length ; n += 2) {
       if (n > 0) {
@@ -1892,9 +1892,9 @@
     TokenStream stream = new WhitespaceTokenizer(new StringReader(inputText.toString()));
     ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream);
     TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
-    Iterator expectedIter = expectedOutputTokens.iterator();
+    Iterator<String> expectedIter = expectedOutputTokens.iterator();
     while (expectedIter.hasNext()) {;
-      assertTermEquals((String)expectedIter.next(), filter, termAtt);
+      assertTermEquals(expectedIter.next(), filter, termAtt);
     }
     assertFalse(filter.incrementToken());
   }

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java Fri Dec  4 13:07:47 2009
@@ -18,7 +18,6 @@
 package org.apache.lucene.analysis;
 
 import java.io.StringReader;
-import java.util.List;
 
 public class TestMappingCharFilter extends BaseTokenStreamTestCase {
 

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java Fri Dec  4 13:07:47 2009
@@ -1,13 +1,9 @@
 package org.apache.lucene.analysis;
 
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+
 import org.apache.lucene.util.Version;
 
-import java.io.StringReader;
 
 /**
  * Copyright 2004 The Apache Software Foundation

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java Fri Dec  4 13:07:47 2009
@@ -30,7 +30,7 @@
 public class TestStopAnalyzer extends BaseTokenStreamTestCase {
   
   private StopAnalyzer stop = new StopAnalyzer(Version.LUCENE_CURRENT);
-  private Set inValidTokens = new HashSet();
+  private Set<Object> inValidTokens = new HashSet<Object>();
   
   public TestStopAnalyzer(String s) {
     super(s);
@@ -40,7 +40,7 @@
   protected void setUp() throws Exception {
     super.setUp();
     
-    Iterator it = StopAnalyzer.ENGLISH_STOP_WORDS_SET.iterator();
+    Iterator<?> it = StopAnalyzer.ENGLISH_STOP_WORDS_SET.iterator();
     while(it.hasNext()) {
       inValidTokens.add(it.next());
     }
@@ -59,7 +59,7 @@
   }
 
   public void testStopList() throws IOException {
-    Set stopWordsSet = new HashSet();
+    Set<Object> stopWordsSet = new HashSet<Object>();
     stopWordsSet.add("good");
     stopWordsSet.add("test");
     stopWordsSet.add("analyzer");
@@ -78,7 +78,7 @@
   }
 
   public void testStopListPositions() throws IOException {
-    Set stopWordsSet = new HashSet();
+    Set<Object> stopWordsSet = new HashSet<Object>();
     stopWordsSet.add("good");
     stopWordsSet.add("test");
     stopWordsSet.add("analyzer");

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java Fri Dec  4 13:07:47 2009
@@ -37,7 +37,7 @@
 
   public void testExactCase() throws IOException {
     StringReader reader = new StringReader("Now is The Time");
-    Set<String> stopWords = new HashSet(Arrays.asList("is", "the", "Time"));
+    Set<String> stopWords = new HashSet<String>(Arrays.asList("is", "the", "Time"));
     TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(reader), stopWords, false);
     final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
     assertTrue(stream.incrementToken());
@@ -49,7 +49,7 @@
 
   public void testIgnoreCase() throws IOException {
     StringReader reader = new StringReader("Now is The Time");
-    Set<String> stopWords = new HashSet(Arrays.asList( "is", "the", "Time" ));
+    Set<Object> stopWords = new HashSet<Object>(Arrays.asList( "is", "the", "Time" ));
     TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(reader), stopWords, true);
     final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
     assertTrue(stream.incrementToken());
@@ -60,7 +60,7 @@
   public void testStopFilt() throws IOException {
     StringReader reader = new StringReader("Now is The Time");
     String[] stopWords = new String[] { "is", "the", "Time" };
-    Set stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
+    Set<Object> stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
     TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(reader), stopSet);
     final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
     assertTrue(stream.incrementToken());
@@ -75,16 +75,16 @@
    */
   public void testStopPositons() throws IOException {
     StringBuilder sb = new StringBuilder();
-    ArrayList a = new ArrayList();
+    ArrayList<String> a = new ArrayList<String>();
     for (int i=0; i<20; i++) {
       String w = English.intToEnglish(i).trim();
       sb.append(w).append(" ");
       if (i%3 != 0) a.add(w);
     }
     log(sb.toString());
-    String stopWords[] = (String[]) a.toArray(new String[0]);
+    String stopWords[] = a.toArray(new String[0]);
     for (int i=0; i<a.size(); i++) log("Stop: "+stopWords[i]);
-    Set stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
+    Set<Object> stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
     // with increments
     StringReader reader = new StringReader(sb.toString());
     StopFilter stpf = new StopFilter(Version.LUCENE_24, new WhitespaceTokenizer(reader), stopSet);
@@ -94,8 +94,8 @@
     stpf = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(reader), stopSet);
     doTestStopPositons(stpf,false);
     // with increments, concatenating two stop filters
-    ArrayList a0 = new ArrayList();
-    ArrayList a1 = new ArrayList();
+    ArrayList<String> a0 = new ArrayList<String>();
+    ArrayList<String> a1 = new ArrayList<String>();
     for (int i=0; i<a.size(); i++) {
       if (i%2==0) { 
         a0.add(a.get(i));
@@ -103,12 +103,12 @@
         a1.add(a.get(i));
       }
     }
-    String stopWords0[] = (String[]) a0.toArray(new String[0]);
+    String stopWords0[] =  a0.toArray(new String[0]);
     for (int i=0; i<a0.size(); i++) log("Stop0: "+stopWords0[i]);
-    String stopWords1[] = (String[]) a1.toArray(new String[0]);
+    String stopWords1[] =  a1.toArray(new String[0]);
     for (int i=0; i<a1.size(); i++) log("Stop1: "+stopWords1[i]);
-    Set stopSet0 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords0);
-    Set stopSet1 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords1);
+    Set<Object> stopSet0 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords0);
+    Set<Object> stopSet1 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords1);
     reader = new StringReader(sb.toString());
     StopFilter stpf0 = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(reader), stopSet0); // first part of the set
     stpf0.setEnablePositionIncrements(true);

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java Fri Dec  4 13:07:47 2009
@@ -26,8 +26,7 @@
 
 import java.io.IOException;
 import java.io.StringReader;
-import java.util.ArrayList;
-import java.util.List;
+
 
 /**
  * tests for the TestTeeSinkTokenFilter

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/tokenattributes/TestSimpleAttributeImpls.java Fri Dec  4 13:07:47 2009
@@ -126,7 +126,7 @@
   }
 
   public static final AttributeImpl assertCopyIsEqual(AttributeImpl att) throws Exception {
-    AttributeImpl copy = (AttributeImpl) att.getClass().newInstance();
+    AttributeImpl copy = att.getClass().newInstance();
     att.copyTo(copy);
     assertEquals("Copied instance must be equal", att, copy);
     assertEquals("Copied instance's hashcode must be equal", att.hashCode(), copy.hashCode());

Modified: lucene/java/trunk/src/test/org/apache/lucene/document/TestDateTools.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/document/TestDateTools.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/document/TestDateTools.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/document/TestDateTools.java Fri Dec  4 13:07:47 2009
@@ -9,7 +9,6 @@
 import java.util.Locale;
 
 import org.apache.lucene.util.LocalizedTestCase;
-import org.apache.lucene.util.LuceneTestCase;
 
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more

Modified: lucene/java/trunk/src/test/org/apache/lucene/document/TestNumberTools.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/document/TestNumberTools.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/document/TestNumberTools.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/document/TestNumberTools.java Fri Dec  4 13:07:47 2009
@@ -29,7 +29,7 @@
     }
 
     public void testMax() {
-        // make sure the constants convert to their equivelents
+        // make sure the constants convert to their equivalents
         assertEquals(Long.MAX_VALUE, NumberTools
                 .stringToLong(NumberTools.MAX_STRING_VALUE));
         assertEquals(NumberTools.MAX_STRING_VALUE, NumberTools

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java Fri Dec  4 13:07:47 2009
@@ -109,7 +109,7 @@
   
   
   
-  public static Map nameValues = null;
+  public static Map<String,Object> nameValues = null;
 
   // ordered list of all the fields...
   // could use LinkedHashMap for this purpose if Java1.4 is OK
@@ -130,17 +130,16 @@
     largeLazyField//placeholder for large field, since this is null.  It must always be last
   };
 
-  // Map<String fieldName, Fieldable field>
-  public static Map all=new HashMap();
-  public static Map indexed=new HashMap();
-  public static Map stored=new HashMap();
-  public static Map unstored=new HashMap();
-  public static Map unindexed=new HashMap();
-  public static Map termvector=new HashMap();
-  public static Map notermvector=new HashMap();
-  public static Map lazy= new HashMap();
-  public static Map noNorms=new HashMap();
-  public static Map noTf=new HashMap();
+  public static Map<String,Fieldable> all     =new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> indexed =new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> stored  =new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> unstored=new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> unindexed=new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> termvector=new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> notermvector=new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> lazy= new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> noNorms=new HashMap<String,Fieldable>();
+  public static Map<String,Fieldable> noTf=new HashMap<String,Fieldable>();
 
   static {
     //Initialize the large Lazy Field
@@ -175,14 +174,14 @@
   }
 
 
-  private static void add(Map map, Fieldable field) {
+  private static void add(Map<String,Fieldable> map, Fieldable field) {
     map.put(field.name(), field);
   }
 
 
   static
   {
-    nameValues = new HashMap();
+    nameValues = new HashMap<String,Object>();
     nameValues.put(TEXT_FIELD_1_KEY, FIELD_1_TEXT);
     nameValues.put(TEXT_FIELD_2_KEY, FIELD_2_TEXT);
     nameValues.put(TEXT_FIELD_3_KEY, FIELD_3_TEXT);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java Fri Dec  4 13:07:47 2009
@@ -20,8 +20,6 @@
 import org.apache.lucene.store.*;
 import org.apache.lucene.document.*;
 import org.apache.lucene.analysis.*;
-import org.apache.lucene.search.*;
-import org.apache.lucene.queryParser.*;
 
 import java.util.Random;
 import java.io.File;

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Fri Dec  4 13:07:47 2009
@@ -286,7 +286,7 @@
     for(int i=0;i<35;i++) {
       if (!reader.isDeleted(i)) {
         Document d = reader.document(i);
-        List fields = d.getFields();
+        List<Fieldable> fields = d.getFields();
         if (!oldName.startsWith("19.") &&
             !oldName.startsWith("20.") &&
             !oldName.startsWith("21.") &&
@@ -295,19 +295,19 @@
           if (d.getField("content3") == null) {
             final int numFields = oldName.startsWith("29.") ? 7 : 5;
             assertEquals(numFields, fields.size());
-            Field f = (Field) d.getField("id");
+            Field f =  d.getField("id");
             assertEquals(""+i, f.stringValue());
 
-            f = (Field) d.getField("utf8");
+            f = d.getField("utf8");
             assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
 
-            f = (Field) d.getField("autf8");
+            f =  d.getField("autf8");
             assertEquals("Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", f.stringValue());
         
-            f = (Field) d.getField("content2");
+            f = d.getField("content2");
             assertEquals("here is more content with aaa aaa aaa", f.stringValue());
 
-            f = (Field) d.getField("fie\u2C77ld");
+            f = d.getField("fie\u2C77ld");
             assertEquals("field with non-ascii name", f.stringValue());
           }
         }       

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestByteSlices.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestByteSlices.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestByteSlices.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestByteSlices.java Fri Dec  4 13:07:47 2009
@@ -21,7 +21,7 @@
 public class TestByteSlices extends LuceneTestCase {
 
   private static class ByteBlockAllocator extends ByteBlockPool.Allocator {
-    ArrayList freeByteBlocks = new ArrayList();
+    ArrayList<byte[]> freeByteBlocks = new ArrayList<byte[]>();
     
     /* Allocate another byte[] from the shared pool */
     @Override
@@ -31,7 +31,7 @@
       if (0 == size)
         b = new byte[DocumentsWriter.BYTE_BLOCK_SIZE];
       else
-        b = (byte[]) freeByteBlocks.remove(size-1);
+        b =  freeByteBlocks.remove(size-1);
       return b;
     }
 

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java Fri Dec  4 13:07:47 2009
@@ -58,7 +58,7 @@
       fail();
     }
     
-    final CheckIndex.Status.SegmentInfoStatus seg = (CheckIndex.Status.SegmentInfoStatus) indexStatus.segmentInfos.get(0);
+    final CheckIndex.Status.SegmentInfoStatus seg = indexStatus.segmentInfos.get(0);
     assertTrue(seg.openReaderPassed);
 
     assertNotNull(seg.diagnostics);
@@ -84,7 +84,7 @@
     assertEquals(18, seg.termVectorStatus.totVectors);
 
     assertTrue(seg.diagnostics.size() > 0);
-    final List onlySegments = new ArrayList();
+    final List<String> onlySegments = new ArrayList<String>();
     onlySegments.add("_0");
     
     assertTrue(checker.checkIndex(onlySegments).clean == true);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java Fri Dec  4 13:07:47 2009
@@ -19,7 +19,6 @@
 
 import java.io.IOException;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
 import java.util.Collection;
@@ -43,14 +42,14 @@
 
 public class TestDeletionPolicy extends LuceneTestCase
 {
-  private void verifyCommitOrder(List commits) throws IOException {
-    final IndexCommit firstCommit = ((IndexCommit) commits.get(0));
+  private void verifyCommitOrder(List<? extends IndexCommit> commits) throws IOException {
+    final IndexCommit firstCommit =  commits.get(0);
     long last = SegmentInfos.generationFromSegmentsFileName(firstCommit.getSegmentsFileName());
     assertEquals(last, firstCommit.getGeneration());
     long lastVersion = firstCommit.getVersion();
     long lastTimestamp = firstCommit.getTimestamp();
     for(int i=1;i<commits.size();i++) {
-      final IndexCommit commit = ((IndexCommit) commits.get(i));
+      final IndexCommit commit =  commits.get(i);
       long now = SegmentInfos.generationFromSegmentsFileName(commit.getSegmentsFileName());
       long nowVersion = commit.getVersion();
       long nowTimestamp = commit.getTimestamp();
@@ -68,12 +67,12 @@
     int numOnInit;
     int numOnCommit;
     Directory dir;
-    public void onInit(List commits) throws IOException {
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
       verifyCommitOrder(commits);
       numOnInit++;
     }
-    public void onCommit(List commits) throws IOException {
-      IndexCommit lastCommit = (IndexCommit) commits.get(commits.size()-1);
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {
+      IndexCommit lastCommit =  commits.get(commits.size()-1);
       IndexReader r = IndexReader.open(dir, true);
       assertEquals("lastCommit.isOptimized()=" + lastCommit.isOptimized() + " vs IndexReader.isOptimized=" + r.isOptimized(), r.isOptimized(), lastCommit.isOptimized());
       r.close();
@@ -89,18 +88,16 @@
   class KeepNoneOnInitDeletionPolicy implements IndexDeletionPolicy {
     int numOnInit;
     int numOnCommit;
-    public void onInit(List commits) throws IOException {
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
       verifyCommitOrder(commits);
       numOnInit++;
       // On init, delete all commit points:
-      Iterator it = commits.iterator();
-      while(it.hasNext()) {
-        final IndexCommit commit = (IndexCommit) it.next();
+      for (final IndexCommit commit : commits) {
         commit.delete();
         assertTrue(commit.isDeleted());
       }
     }
-    public void onCommit(List commits) throws IOException {
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {
       verifyCommitOrder(commits);
       int size = commits.size();
       // Delete all but last one:
@@ -116,25 +113,25 @@
     int numOnCommit;
     int numToKeep;
     int numDelete;
-    Set seen = new HashSet();
+    Set<String> seen = new HashSet<String>();
 
     public KeepLastNDeletionPolicy(int numToKeep) {
       this.numToKeep = numToKeep;
     }
 
-    public void onInit(List commits) throws IOException {
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
       verifyCommitOrder(commits);
       numOnInit++;
       // do no deletions on init
       doDeletes(commits, false);
     }
 
-    public void onCommit(List commits) throws IOException {
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {
       verifyCommitOrder(commits);
       doDeletes(commits, true);
     }
     
-    private void doDeletes(List commits, boolean isCommit) {
+    private void doDeletes(List<? extends IndexCommit> commits, boolean isCommit) {
 
       // Assert that we really are only called for each new
       // commit:
@@ -169,23 +166,21 @@
       this.expirationTimeSeconds = seconds;
     }
 
-    public void onInit(List commits) throws IOException {
+    public void onInit(List<? extends IndexCommit> commits) throws IOException {
       verifyCommitOrder(commits);
       onCommit(commits);
     }
 
-    public void onCommit(List commits) throws IOException {
+    public void onCommit(List<? extends IndexCommit> commits) throws IOException {
       verifyCommitOrder(commits);
 
-      IndexCommit lastCommit = (IndexCommit) commits.get(commits.size()-1);
+      IndexCommit lastCommit = commits.get(commits.size()-1);
 
       // Any commit older than expireTime should be deleted:
       double expireTime = dir.fileModified(lastCommit.getSegmentsFileName())/1000.0 - expirationTimeSeconds;
 
-      Iterator it = commits.iterator();
 
-      while(it.hasNext()) {
-        IndexCommit commit = (IndexCommit) it.next();
+      for (final IndexCommit commit : commits) {
         double modTime = dir.fileModified(commit.getSegmentsFileName())/1000.0;
         if (commit != lastCommit && modTime < expireTime) {
           commit.delete();
@@ -297,14 +292,12 @@
       assertEquals(2, policy.numOnCommit);
 
       // Test listCommits
-      Collection commits = IndexReader.listCommits(dir);
+      Collection<IndexCommit> commits = IndexReader.listCommits(dir);
       // 1 from opening writer + 2 from closing writer
       assertEquals(3, commits.size());
 
-      Iterator it = commits.iterator();
       // Make sure we can open a reader on each commit:
-      while(it.hasNext()) {
-        IndexCommit commit = (IndexCommit) it.next();
+      for (final IndexCommit commit : commits) {
         IndexReader r = IndexReader.open(commit, null, false);
         r.close();
       }
@@ -356,12 +349,10 @@
     }
     writer.close();
 
-    Collection commits = IndexReader.listCommits(dir);
+    Collection<IndexCommit> commits = IndexReader.listCommits(dir);
     assertEquals(6, commits.size());
     IndexCommit lastCommit = null;
-    Iterator it = commits.iterator();
-    while(it.hasNext()) {
-      IndexCommit commit = (IndexCommit) it.next();
+    for (final IndexCommit commit : commits) {
       if (lastCommit == null || commit.getGeneration() > lastCommit.getGeneration())
         lastCommit = commit;
     }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestDoc.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestDoc.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestDoc.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestDoc.java Fri Dec  4 13:07:47 2009
@@ -22,7 +22,7 @@
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.StringWriter;
-import java.util.Iterator;
+
 import java.util.LinkedList;
 import java.util.List;
 
@@ -48,7 +48,7 @@
 
     private File workDir;
     private File indexDir;
-    private LinkedList files;
+    private LinkedList<File> files;
 
 
     /** Set the test case. This test case needs
@@ -66,7 +66,7 @@
         Directory directory = FSDirectory.open(indexDir);
         directory.close();
 
-        files = new LinkedList();
+        files = new LinkedList<File>();
         files.add(createOutput("test.txt",
             "This is the first test file"
         ));
@@ -188,9 +188,9 @@
       merger.closeReaders();
       
       if (useCompoundFile) {
-        List filesToDelete = merger.createCompoundFile(merged + ".cfs");
-        for (Iterator iter = filesToDelete.iterator(); iter.hasNext();)
-          si1.dir.deleteFile((String) iter.next());
+        List<String> filesToDelete = merger.createCompoundFile(merged + ".cfs");
+        for (final String fileToDelete : filesToDelete) 
+          si1.dir.deleteFile(fileToDelete);
       }
 
       return new SegmentInfo(merged, si1.docCount + si2.docCount, si1.dir, useCompoundFile, true);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestFieldsReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestFieldsReader.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestFieldsReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestFieldsReader.java Fri Dec  4 13:07:47 2009
@@ -100,10 +100,10 @@
     FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
     assertTrue(reader != null);
     assertTrue(reader.size() == 1);
-    Set loadFieldNames = new HashSet();
+    Set<String> loadFieldNames = new HashSet<String>();
     loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY);
     loadFieldNames.add(DocHelper.TEXT_FIELD_UTF1_KEY);
-    Set lazyFieldNames = new HashSet();
+    Set<String> lazyFieldNames = new HashSet<String>();
     //new String[]{DocHelper.LARGE_LAZY_FIELD_KEY, DocHelper.LAZY_FIELD_KEY, DocHelper.LAZY_FIELD_BINARY_KEY};
     lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
     lazyFieldNames.add(DocHelper.LAZY_FIELD_KEY);
@@ -150,10 +150,10 @@
     FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
     assertTrue(reader != null);
     assertTrue(reader.size() == 1);
-    Set loadFieldNames = new HashSet();
+    Set<String> loadFieldNames = new HashSet<String>();
     loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY);
     loadFieldNames.add(DocHelper.TEXT_FIELD_UTF1_KEY);
-    Set lazyFieldNames = new HashSet();
+    Set<String> lazyFieldNames = new HashSet<String>();
     lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
     lazyFieldNames.add(DocHelper.LAZY_FIELD_KEY);
     lazyFieldNames.add(DocHelper.LAZY_FIELD_BINARY_KEY);
@@ -183,9 +183,10 @@
     Document doc = reader.doc(0, fieldSelector);
     assertTrue("doc is null and it shouldn't be", doc != null);
     int count = 0;
-    List l = doc.getFields();
-    for (Iterator iter = l.iterator(); iter.hasNext();) {
-      Field field = (Field) iter.next();
+    List<Fieldable> l = doc.getFields();
+    for (final Fieldable fieldable : l ) {
+      Field field = (Field) fieldable;
+
       assertTrue("field is null and it shouldn't be", field != null);
       String sv = field.stringValue();
       assertTrue("sv is null and it shouldn't be", sv != null);
@@ -220,9 +221,9 @@
     long lazyTime = 0;
     long regularTime = 0;
     int length = 50;
-    Set lazyFieldNames = new HashSet();
+    Set<String> lazyFieldNames = new HashSet<String>();
     lazyFieldNames.add(DocHelper.LARGE_LAZY_FIELD_KEY);
-    SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(Collections.EMPTY_SET, lazyFieldNames);
+    SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(Collections. <String> emptySet(), lazyFieldNames);
 
     for (int i = 0; i < length; i++) {
       reader = new FieldsReader(tmpDir, TEST_SEGMENT_NAME, fieldInfos);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java Fri Dec  4 13:07:47 2009
@@ -28,7 +28,6 @@
 import org.apache.lucene.document.Field;
 import java.io.*;
 import java.util.*;
-import java.util.zip.*;
 
 /*
   Verify we can read the pre-2.1 file format, do searches
@@ -155,33 +154,34 @@
     Arrays.sort(files);
     Arrays.sort(files2);
     
-    Set dif = difFiles(files, files2);
+    Set<String> dif = difFiles(files, files2);
     
     if (!Arrays.equals(files, files2)) {
       fail("IndexFileDeleter failed to delete unreferenced extra files: should have deleted " + (filesPre.length-files.length) + " files but only deleted " + (filesPre.length - files2.length) + "; expected files:\n    " + asString(files) + "\n  actual files:\n    " + asString(files2)+"\ndif: "+dif);
     }
   }
 
-  private static Set difFiles(String[] files1, String[] files2) {
-    Set set1 = new HashSet();
-    Set set2 = new HashSet();
-    Set extra = new HashSet();
+  private static Set<String> difFiles(String[] files1, String[] files2) {
+    Set<String> set1 = new HashSet<String>();
+    Set<String> set2 = new HashSet<String>();
+    Set<String> extra = new HashSet<String>();
+    
     for (int x=0; x < files1.length; x++) {
       set1.add(files1[x]);
     }
     for (int x=0; x < files2.length; x++) {
       set2.add(files2[x]);
     }
-    Iterator i1 = set1.iterator();
+    Iterator<String> i1 = set1.iterator();
     while (i1.hasNext()) {
-      Object o = i1.next();
+      String o = i1.next();
       if (!set2.contains(o)) {
         extra.add(o);
       }
     }
-    Iterator i2 = set2.iterator();
+    Iterator<String> i2 = set2.iterator();
     while (i2.hasNext()) {
-      Object o = i2.next();
+      String o = i2.next();
       if (!set1.contains(o)) {
         extra.add(o);
       }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java Fri Dec  4 13:07:47 2009
@@ -25,9 +25,11 @@
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.HashMap;
 import java.util.Set;
+import java.util.SortedSet;
 
 import junit.framework.TestSuite;
 import junit.textui.TestRunner;
@@ -72,7 +74,7 @@
     public void testCommitUserData() throws Exception {
       RAMDirectory d = new MockRAMDirectory();
 
-      Map commitUserData = new HashMap();
+      Map<String,String> commitUserData = new HashMap<String,String>();
       commitUserData.put("foo", "fighters");
       
       // set up writer
@@ -156,7 +158,7 @@
         writer.close();
         // set up reader
         IndexReader reader = IndexReader.open(d, false);
-        Collection fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
+        Collection<String> fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
         assertTrue(fieldNames.contains("keyword"));
         assertTrue(fieldNames.contains("text"));
         assertTrue(fieldNames.contains("unindexed"));
@@ -260,12 +262,12 @@
     IndexReader reader = IndexReader.open(d, false);
     FieldSortedTermVectorMapper mapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
     reader.getTermFreqVector(0, mapper);
-    Map map = mapper.getFieldToTerms();
+    Map<String,SortedSet<TermVectorEntry>> map = mapper.getFieldToTerms();
     assertTrue("map is null and it shouldn't be", map != null);
     assertTrue("map Size: " + map.size() + " is not: " + 4, map.size() == 4);
-    Set set = (Set) map.get("termvector");
-    for (Iterator iterator = set.iterator(); iterator.hasNext();) {
-      TermVectorEntry entry = (TermVectorEntry) iterator.next();
+    Set<TermVectorEntry> set = map.get("termvector");
+    for (Iterator<TermVectorEntry> iterator = set.iterator(); iterator.hasNext();) {
+      TermVectorEntry entry =  iterator.next();
       assertTrue("entry is null and it shouldn't be", entry != null);
       System.out.println("Entry: " + entry);
     }
@@ -380,9 +382,9 @@
         for (int i = 0; i < bin.length; i++) {
           assertEquals(bin[i], data1[i + b1.getBinaryOffset()]);
         }
-        Set lazyFields = new HashSet();
+        Set<String> lazyFields = new HashSet<String>();
         lazyFields.add("bin1");
-        FieldSelector sel = new SetBasedFieldSelector(new HashSet(), lazyFields);
+        FieldSelector sel = new SetBasedFieldSelector(new HashSet<String>(), lazyFields);
         doc = reader.document(reader.maxDoc() - 1, sel);
         Fieldable[] fieldables = doc.getFieldables("bin1");
         assertNotNull(fieldables);
@@ -1340,19 +1342,19 @@
       assertEquals("Only one index is optimized.", index1.isOptimized(), index2.isOptimized());
       
       // check field names
-      Collection fields1 = index1.getFieldNames(FieldOption.ALL);
-      Collection fields2 = index1.getFieldNames(FieldOption.ALL);
+      Collection<String> fields1 = index1.getFieldNames(FieldOption.ALL);
+      Collection<String> fields2 = index1.getFieldNames(FieldOption.ALL);
       assertEquals("IndexReaders have different numbers of fields.", fields1.size(), fields2.size());
-      Iterator it1 = fields1.iterator();
-      Iterator it2 = fields1.iterator();
+      Iterator<String> it1 = fields1.iterator();
+      Iterator<String> it2 = fields1.iterator();
       while (it1.hasNext()) {
-        assertEquals("Different field names.", (String) it1.next(), (String) it2.next());
+        assertEquals("Different field names.", it1.next(), it2.next());
       }
       
       // check norms
       it1 = fields1.iterator();
       while (it1.hasNext()) {
-        String curField = (String) it1.next();
+        String curField = it1.next();
         byte[] norms1 = index1.norms(curField);
         byte[] norms2 = index2.norms(curField);
         if (norms1 != null && norms2 != null)
@@ -1378,14 +1380,14 @@
         if (!index1.isDeleted(i)) {
           Document doc1 = index1.document(i);
           Document doc2 = index2.document(i);
-          fields1 = doc1.getFields();
-          fields2 = doc2.getFields();
-          assertEquals("Different numbers of fields for doc " + i + ".", fields1.size(), fields2.size());
-          it1 = fields1.iterator();
-          it2 = fields2.iterator();
-          while (it1.hasNext()) {
-            Field curField1 = (Field) it1.next();
-            Field curField2 = (Field) it2.next();
+          List<Fieldable> fieldable1 = doc1.getFields();
+          List<Fieldable> fieldable2 = doc2.getFields();
+          assertEquals("Different numbers of fields for doc " + i + ".", fieldable1.size(), fieldable2.size());
+          Iterator<Fieldable> itField1 = fieldable1.iterator();
+          Iterator<Fieldable> itField2 = fieldable2.iterator();
+          while (itField1.hasNext()) {
+            Field curField1 = (Field) itField1.next();
+            Field curField2 = (Field) itField2.next();
             assertEquals("Different fields names for doc " + i + ".", curField1.name(), curField2.name());
             assertEquals("Different field values for doc " + i + ".", curField1.stringValue(), curField2.stringValue());
           }          
@@ -1587,15 +1589,11 @@
     writer.addDocument(createDocument("a"));
     writer.close();
     
-    Collection commits = IndexReader.listCommits(dir);
-    Iterator it = commits.iterator();
-    while(it.hasNext()) {
-      IndexCommit commit = (IndexCommit) it.next();
-      Collection files = commit.getFileNames();
-      HashSet seen = new HashSet();
-      Iterator it2 = files.iterator();
-      while(it2.hasNext()) {
-        String fileName = (String) it2.next();
+    Collection<IndexCommit> commits = IndexReader.listCommits(dir);
+    for (final IndexCommit commit : commits) {
+      Collection<String> files = commit.getFileNames();
+      HashSet<String> seen = new HashSet<String>();
+      for (final String fileName : files) { 
         assertTrue("file " + fileName + " was duplicated", !seen.contains(fileName));
         seen.add(fileName);
       }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java Fri Dec  4 13:07:47 2009
@@ -55,9 +55,9 @@
 
   private int numDocNorms;
 
-  private ArrayList norms;
+  private ArrayList<Float> norms;
 
-  private ArrayList modifiedNorms;
+  private ArrayList<Float> modifiedNorms;
 
   private float lastNorm = 0;
 
@@ -91,19 +91,19 @@
     Directory dir1 = FSDirectory.open(indexDir1);
     IndexWriter.unlock(dir1);
 
-    norms = new ArrayList();
-    modifiedNorms = new ArrayList();
+    norms = new ArrayList<Float>();
+    modifiedNorms = new ArrayList<Float>();
 
     createIndex(dir1);
     doTestNorms(dir1);
 
     // test with a single index: index2
-    ArrayList norms1 = norms;
-    ArrayList modifiedNorms1 = modifiedNorms;
+    ArrayList<Float> norms1 = norms;
+    ArrayList<Float> modifiedNorms1 = modifiedNorms;
     int numDocNorms1 = numDocNorms;
 
-    norms = new ArrayList();
-    modifiedNorms = new ArrayList();
+    norms = new ArrayList<Float>();
+    modifiedNorms = new ArrayList<Float>();
     numDocNorms = 0;
 
     File indexDir2 = new File(tempDir, "lucenetestindex2");
@@ -282,10 +282,10 @@
       String field = "f" + i;
       byte b[] = ir.norms(field);
       assertEquals("number of norms mismatches", numDocNorms, b.length);
-      ArrayList storedNorms = (i == 1 ? modifiedNorms : norms);
+      ArrayList<Float> storedNorms = (i == 1 ? modifiedNorms : norms);
       for (int j = 0; j < b.length; j++) {
         float norm = Similarity.getDefault().decodeNormValue(b[j]);
-        float norm1 = ((Float) storedNorms.get(j)).floatValue();
+        float norm1 =  storedNorms.get(j).floatValue();
         assertEquals("stored norm value of " + field + " for doc " + j + " is "
             + norm + " - a mismatch!", norm, norm1, 0.000001);
       }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderReopen.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderReopen.java Fri Dec  4 13:07:47 2009
@@ -20,9 +20,10 @@
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
-import java.util.Iterator;
+
 import java.util.List;
 import java.util.Random;
 import java.util.Map;
@@ -732,13 +733,13 @@
       }      
     };
     
-    final List readers = Collections.synchronizedList(new ArrayList());
+    final List<ReaderCouple> readers = Collections.synchronizedList(new ArrayList<ReaderCouple>());
     IndexReader firstReader = IndexReader.open(dir, false);
     IndexReader reader = firstReader;
     final Random rnd = newRandom();
     
     ReaderThread[] threads = new ReaderThread[n];
-    final Set readersToClose = Collections.synchronizedSet(new HashSet());
+    final Set<IndexReader> readersToClose = Collections.synchronizedSet(new HashSet<IndexReader>());
     
     for (int i = 0; i < n; i++) {
       if (i % 2 == 0) {
@@ -806,7 +807,7 @@
             while (!stopped) {
               int numReaders = readers.size();
               if (numReaders > 0) {
-                ReaderCouple c = (ReaderCouple) readers.get(rnd.nextInt(numReaders));
+                ReaderCouple c =  readers.get(rnd.nextInt(numReaders));
                 TestIndexReader.assertIndexEquals(c.newReader, c.refreshedReader);
               }
               
@@ -845,17 +846,15 @@
       
     }
     
-    Iterator it = readersToClose.iterator();
-    while (it.hasNext()) {
-      ((IndexReader) it.next()).close();
+    for (final IndexReader readerToClose : readersToClose) {
+      readerToClose.close();
     }
     
     firstReader.close();
     reader.close();
     
-    it = readersToClose.iterator();
-    while (it.hasNext()) {
-      assertReaderClosed((IndexReader) it.next(), true, true);
+    for (final IndexReader readerToClose : readersToClose) {
+      assertReaderClosed(readerToClose, true, true);
     }
 
     assertReaderClosed(reader, true, true);
@@ -1185,9 +1184,9 @@
   }
 
   private static class KeepAllCommits implements IndexDeletionPolicy {
-    public void onInit(List commits) {
+    public void onInit(List<? extends IndexCommit> commits) {
     }
-    public void onCommit(List commits) {
+    public void onCommit(List<? extends IndexCommit> commits) {
     }
   }
 
@@ -1198,13 +1197,13 @@
       Document doc = new Document();
       doc.add(new Field("id", ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
       writer.addDocument(doc);
-      Map data = new HashMap();
+      Map<String,String> data = new HashMap<String,String>();
       data.put("index", i+"");
       writer.commit(data);
     }
     for(int i=0;i<4;i++) {
       writer.deleteDocuments(new Term("id", ""+i));
-      Map data = new HashMap();
+      Map<String,String> data = new HashMap<String,String>();
       data.put("index", (4+i)+"");
       writer.commit(data);
     }
@@ -1214,9 +1213,8 @@
     assertEquals(0, r.numDocs());
     assertEquals(4, r.maxDoc());
 
-    Iterator it = IndexReader.listCommits(dir).iterator();
-    while(it.hasNext()) {
-      IndexCommit commit = (IndexCommit) it.next();
+    Collection<IndexCommit> commits = IndexReader.listCommits(dir);
+    for (final IndexCommit commit : commits) {
       IndexReader r2 = r.reopen(commit);
       assertTrue(r2 != r);
 
@@ -1228,13 +1226,13 @@
         // expected
       }
 
-      final Map s = commit.getUserData();
+      final Map<String,String> s = commit.getUserData();
       final int v;
       if (s.size() == 0) {
         // First commit created by IW
         v = -1;
       } else {
-        v = Integer.parseInt((String) s.get("index"));
+        v = Integer.parseInt(s.get("index"));
       }
       if (v < 4) {
         assertEquals(1+v, r2.numDocs());

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java Fri Dec  4 13:07:47 2009
@@ -48,6 +48,7 @@
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.Query;
@@ -2131,7 +2132,7 @@
         writer.setMergeFactor(2);
 
         final IndexWriter finalWriter = writer;
-        final ArrayList failure = new ArrayList();
+        final ArrayList<Throwable> failure = new ArrayList<Throwable>();
         Thread t1 = new Thread() {
             @Override
             public void run() {
@@ -2160,7 +2161,7 @@
           };
 
         if (failure.size() > 0)
-          throw (Throwable) failure.get(0);
+          throw failure.get(0);
 
         t1.start();
 
@@ -3475,14 +3476,14 @@
       final TermAttribute termAtt = addAttribute(TermAttribute.class);
       final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
       
-      final Iterator tokens = Arrays.asList(new String[]{"a","b","c"}).iterator();
+      final Iterator<String> tokens = Arrays.asList(new String[]{"a","b","c"}).iterator();
       boolean first = true;
       
       @Override
       public boolean incrementToken() {
         if (!tokens.hasNext()) return false;
         clearAttributes();
-        termAtt.setTermBuffer((String) tokens.next());
+        termAtt.setTermBuffer( tokens.next());
         posIncrAtt.setPositionIncrement(first ? 0 : 1);
         first = false;
         return true;
@@ -3643,7 +3644,7 @@
     Directory dir, dir2;
     final static int NUM_INIT_DOCS = 17;
     IndexWriter writer2;
-    final List failures = new ArrayList();
+    final List<Throwable> failures = new ArrayList<Throwable>();
     volatile boolean didClose;
     final IndexReader[] readers;
     final int NUM_COPY;
@@ -3992,7 +3993,7 @@
     w.setMaxBufferedDocs(2);
     for(int j=0;j<17;j++)
       addDoc(w);
-    Map data = new HashMap();
+    Map<String,String> data = new HashMap<String,String>();
     data.put("label", "test1");
     w.commit(data);
     w.close();
@@ -4040,7 +4041,7 @@
   // LUCENE-1429
   public void testOutOfMemoryErrorCausesCloseToFail() throws Exception {
 
-    final List thrown = new ArrayList();
+    final List<Throwable> thrown = new ArrayList<Throwable>();
 
     final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED) {
         @Override
@@ -4562,7 +4563,7 @@
     w.addDocument(doc);
     IndexReader r = w.getReader();
     doc = r.document(0);
-    Iterator it = doc.getFields().iterator();
+    Iterator<Fieldable> it = doc.getFields().iterator();
     assertTrue(it.hasNext());
     Field f = (Field) it.next();
     assertEquals(f.name(), "zzz");

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java Fri Dec  4 13:07:47 2009
@@ -109,7 +109,7 @@
     }
   }
 
-  ThreadLocal doFail = new ThreadLocal();
+  ThreadLocal<Thread> doFail = new ThreadLocal<Thread>();
 
   public class MockIndexWriter extends IndexWriter {
     Random r = new java.util.Random(17);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterReader.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterReader.java Fri Dec  4 13:07:47 2009
@@ -306,10 +306,10 @@
     final static int NUM_THREADS = 5;
     final Thread[] threads = new Thread[NUM_THREADS];
     IndexWriter mainWriter;
-    List deletedTerms = new ArrayList();
-    LinkedList toDeleteTerms = new LinkedList();
+    List<Term> deletedTerms = new ArrayList<Term>();
+    LinkedList<Term> toDeleteTerms = new LinkedList<Term>();
     Random random;
-    final List failures = new ArrayList();
+    final List<Throwable> failures = new ArrayList<Throwable>();
     
     public DeleteThreads(IndexWriter mainWriter) throws IOException {
       this.mainWriter = mainWriter;
@@ -326,7 +326,7 @@
     
     Term getDeleteTerm() {
       synchronized (toDeleteTerms) {
-        return (Term)toDeleteTerms.removeFirst();
+        return toDeleteTerms.removeFirst();
       }
     }
     
@@ -373,7 +373,7 @@
     int numDirs;
     final Thread[] threads = new Thread[NUM_THREADS];
     IndexWriter mainWriter;
-    final List failures = new ArrayList();
+    final List<Throwable> failures = new ArrayList<Throwable>();
     IndexReader[] readers;
     boolean didClose = false;
     HeavyAtomicInt count = new HeavyAtomicInt(0);
@@ -723,7 +723,7 @@
     final float SECONDS = 0.5f;
 
     final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
-    final List excs = Collections.synchronizedList(new ArrayList());
+    final List<Throwable> excs = Collections.synchronizedList(new ArrayList<Throwable>());
 
     final Thread[] threads = new Thread[NUM_THREAD];
     for(int i=0;i<NUM_THREAD;i++) {
@@ -787,7 +787,7 @@
     final float SECONDS = 0.5f;
 
     final long endTime = (long) (System.currentTimeMillis() + 1000.*SECONDS);
-    final List excs = Collections.synchronizedList(new ArrayList());
+    final List<Throwable> excs = Collections.synchronizedList(new ArrayList<Throwable>());
 
     final Thread[] threads = new Thread[NUM_THREAD];
     for(int i=0;i<NUM_THREAD;i++) {

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyBug.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyBug.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyBug.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyBug.java Fri Dec  4 13:07:47 2009
@@ -46,7 +46,7 @@
     "this string is a bigger string, mary had a little lamb, little lamb, little lamb!"
   };
 
-  private static Set dataset = new HashSet(Arrays.asList(data));
+  private static Set<String> dataset = new HashSet<String>(Arrays.asList(data));
   
   private static String MAGIC_FIELD = "f"+(NUM_FIELDS/3);
   
@@ -93,11 +93,11 @@
       Document d = reader.document(docs[i], SELECTOR);
       d.get(MAGIC_FIELD);
       
-      List fields = d.getFields();
-      for (Iterator fi = fields.iterator(); fi.hasNext(); ) {
+      List<Fieldable> fields = d.getFields();
+      for (Iterator<Fieldable> fi = fields.iterator(); fi.hasNext(); ) {
         Fieldable f=null;
         try {
-          f = (Fieldable) fi.next();
+          f =  fi.next();
           String fname = f.name();
           String fval = f.stringValue();
           assertNotNull(docs[i]+" FIELD: "+fname, fval);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestNorms.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestNorms.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestNorms.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestNorms.java Fri Dec  4 13:07:47 2009
@@ -52,8 +52,8 @@
   private Similarity similarityOne;
   private Analyzer anlzr;
   private int numDocNorms;
-  private ArrayList norms; 
-  private ArrayList modifiedNorms; 
+  private ArrayList<Float> norms; 
+  private ArrayList<Float> modifiedNorms; 
   private float lastNorm = 0;
   private float normDelta = (float) 0.001;
 
@@ -85,19 +85,19 @@
     File indexDir1 = new File(tempDir, "lucenetestindex1");
     Directory dir1 = FSDirectory.open(indexDir1);
 
-    norms = new ArrayList();
-    modifiedNorms = new ArrayList();
+    norms = new ArrayList<Float>();
+    modifiedNorms = new ArrayList<Float>();
 
     createIndex(dir1);
     doTestNorms(dir1);
 
     // test with a single index: index2
-    ArrayList norms1 = norms;
-    ArrayList modifiedNorms1 = modifiedNorms;
+    ArrayList<Float> norms1 = norms;
+    ArrayList<Float> modifiedNorms1 = modifiedNorms;
     int numDocNorms1 = numDocNorms;
 
-    norms = new ArrayList();
-    modifiedNorms = new ArrayList();
+    norms = new ArrayList<Float>();
+    modifiedNorms = new ArrayList<Float>();
     numDocNorms = 0;
     
     File indexDir2 = new File(tempDir, "lucenetestindex2");
@@ -187,10 +187,10 @@
       String field = "f"+i;
       byte b[] = ir.norms(field);
       assertEquals("number of norms mismatches",numDocNorms,b.length);
-      ArrayList storedNorms = (i==1 ? modifiedNorms : norms);
+      ArrayList<Float> storedNorms = (i==1 ? modifiedNorms : norms);
       for (int j = 0; j < b.length; j++) {
         float norm = similarityOne.decodeNormValue(b[j]);
-        float norm1 = ((Float)storedNorms.get(j)).floatValue();
+        float norm1 = storedNorms.get(j).floatValue();
         assertEquals("stored norm value of "+field+" for doc "+j+" is "+norm+" - a mismatch!", norm, norm1, 0.000001);
       }
     }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelReader.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelReader.java Fri Dec  4 13:07:47 2009
@@ -71,7 +71,7 @@
     ParallelReader pr = new ParallelReader();
     pr.add(IndexReader.open(dir1, false));
     pr.add(IndexReader.open(dir2, false));
-    Collection fieldNames = pr.getFieldNames(IndexReader.FieldOption.ALL);
+    Collection<String> fieldNames = pr.getFieldNames(IndexReader.FieldOption.ALL);
     assertEquals(4, fieldNames.size());
     assertTrue(fieldNames.contains("f1"));
     assertTrue(fieldNames.contains("f2"));

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java Fri Dec  4 13:07:47 2009
@@ -382,7 +382,7 @@
      * This Analyzer uses an WhitespaceTokenizer and PayloadFilter.
      */
     private static class PayloadAnalyzer extends Analyzer {
-        Map fieldToData = new HashMap();
+        Map<String,PayloadData> fieldToData = new HashMap<String,PayloadData>();
         
         void setPayloadData(String field, byte[] data, int offset, int length) {
             fieldToData.put(field, new PayloadData(0, data, offset, length));
@@ -394,7 +394,7 @@
         
         @Override
         public TokenStream tokenStream(String fieldName, Reader reader) {
-            PayloadData payload = (PayloadData) fieldToData.get(fieldName);
+            PayloadData payload =  fieldToData.get(fieldName);
             TokenStream ts = new WhitespaceTokenizer(reader);
             if (payload != null) {
                 if (payload.numFieldInstancesToSkip == 0) {
@@ -550,10 +550,10 @@
     }
     
     private static class ByteArrayPool {
-        private List pool;
+        private List<byte[]> pool;
         
         ByteArrayPool(int capacity, int size) {
-            pool = new ArrayList();
+            pool = new ArrayList<byte[]>();
             for (int i = 0; i < capacity; i++) {
                 pool.add(new byte[size]);
             }
@@ -572,7 +572,7 @@
         }
     
         synchronized byte[] get() {
-            return (byte[]) pool.remove(0);
+            return pool.remove(0);
         }
         
         synchronized void release(byte[] b) {

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestPositionBasedTermVectorMapper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestPositionBasedTermVectorMapper.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestPositionBasedTermVectorMapper.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestPositionBasedTermVectorMapper.java Fri Dec  4 13:07:47 2009
@@ -19,7 +19,6 @@
 
 import java.io.IOException;
 import java.util.BitSet;
-import java.util.Iterator;
 import java.util.Map;
 
 public class TestPositionBasedTermVectorMapper extends LuceneTestCase {
@@ -69,19 +68,19 @@
       mapper.map(token, 1, null, thePositions[i]);
 
     }
-    Map map = mapper.getFieldToTerms();
+    Map<String,Map<Integer,PositionBasedTermVectorMapper.TVPositionInfo>> map = mapper.getFieldToTerms();
     assertTrue("map is null and it shouldn't be", map != null);
     assertTrue("map Size: " + map.size() + " is not: " + 1, map.size() == 1);
-    Map positions = (Map) map.get("test");
+    Map<Integer,PositionBasedTermVectorMapper.TVPositionInfo> positions = map.get("test");
     assertTrue("thePositions is null and it shouldn't be", positions != null);
     
     assertTrue("thePositions Size: " + positions.size() + " is not: " + numPositions, positions.size() == numPositions);
     BitSet bits = new BitSet(numPositions);
-    for (Iterator iterator = positions.entrySet().iterator(); iterator.hasNext();) {
-      Map.Entry entry = (Map.Entry) iterator.next();
-      PositionBasedTermVectorMapper.TVPositionInfo info = (PositionBasedTermVectorMapper.TVPositionInfo) entry.getValue();
+    for (Map.Entry<Integer,PositionBasedTermVectorMapper.TVPositionInfo> entry : positions.entrySet()) {
+    
+      PositionBasedTermVectorMapper.TVPositionInfo info = entry.getValue();
       assertTrue("info is null and it shouldn't be", info != null);
-      int pos = ((Integer) entry.getKey()).intValue();
+      int pos = entry.getKey().intValue();
       bits.set(pos);
       assertTrue(info.getPosition() + " does not equal: " + pos, info.getPosition() == pos);
       assertTrue("info.getOffsets() is null and it shouldn't be", info.getOffsets() != null);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestSegmentMerger.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestSegmentMerger.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestSegmentMerger.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestSegmentMerger.java Fri Dec  4 13:07:47 2009
@@ -85,7 +85,7 @@
     assertTrue(termDocs != null);
     assertTrue(termDocs.next() == true);
     
-    Collection stored = mergedReader.getFieldNames(IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR);
+    Collection<String> stored = mergedReader.getFieldNames(IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR);
     assertTrue(stored != null);
     //System.out.println("stored size: " + stored.size());
     assertTrue("We do not have 3 fields that were indexed with term vector",stored.size() == 3);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestSegmentReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestSegmentReader.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestSegmentReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestSegmentReader.java Fri Dec  4 13:07:47 2009
@@ -62,9 +62,8 @@
     //There are 2 unstored fields on the document that are not preserved across writing
     assertTrue(DocHelper.numFields(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size());
     
-    List fields = result.getFields();
-    for (Iterator iter = fields.iterator(); iter.hasNext();) {
-      Fieldable field = (Fieldable) iter.next();
+    List<Fieldable> fields = result.getFields();
+    for (final Fieldable field : fields ) { 
       assertTrue(field != null);
       assertTrue(DocHelper.nameValues.containsKey(field.name()));
     }
@@ -84,19 +83,19 @@
   }    
   
   public void testGetFieldNameVariations() {
-    Collection result = reader.getFieldNames(IndexReader.FieldOption.ALL);
+    Collection<String> result = reader.getFieldNames(IndexReader.FieldOption.ALL);
     assertTrue(result != null);
     assertTrue(result.size() == DocHelper.all.size());
-    for (Iterator iter = result.iterator(); iter.hasNext();) {
-      String s = (String) iter.next();
+    for (Iterator<String> iter = result.iterator(); iter.hasNext();) {
+      String s =  iter.next();
       //System.out.println("Name: " + s);
       assertTrue(DocHelper.nameValues.containsKey(s) == true || s.equals(""));
     }                                                                               
     result = reader.getFieldNames(IndexReader.FieldOption.INDEXED);
     assertTrue(result != null);
     assertTrue(result.size() == DocHelper.indexed.size());
-    for (Iterator iter = result.iterator(); iter.hasNext();) {
-      String s = (String) iter.next();
+    for (Iterator<String> iter = result.iterator(); iter.hasNext();) {
+      String s = iter.next();
       assertTrue(DocHelper.indexed.containsKey(s) == true || s.equals(""));
     }
     

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java Fri Dec  4 13:07:47 2009
@@ -21,7 +21,6 @@
 import org.apache.lucene.document.*;
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.search.*;
-import org.apache.lucene.queryParser.*;
 
 import java.util.Random;
 import java.io.File;
@@ -155,7 +154,7 @@
     modifier.close();
 
     for(int i=0;i<numThread;i++)
-      assertTrue(!((TimedThread) threads[i]).failed);
+      assertTrue(! threads[i].failed);
 
     //System.out.println("    Writer: " + indexerThread.count + " iterations");
     //System.out.println("Searcher 1: " + searcherThread1.count + " searchers created");

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing2.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing2.java?rev=887181&r1=887180&r2=887181&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing2.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing2.java Fri Dec  4 13:07:47 2009
@@ -73,7 +73,7 @@
     // dir1 = FSDirectory.open("foofoofoo");
     Directory dir2 = new MockRAMDirectory();
     // mergeFactor=2; maxBufferedDocs=2; Map docs = indexRandom(1, 3, 2, dir1);
-    Map docs = indexRandom(10, 10, 100, dir1);
+    Map<String,Document> docs = indexRandom(10, 10, 100, dir1);
     indexSerial(docs, dir2);
 
     // verifying verify
@@ -97,7 +97,7 @@
       int range=r.nextInt(20)+1;
       Directory dir1 = new MockRAMDirectory();
       Directory dir2 = new MockRAMDirectory();
-      Map docs = indexRandom(nThreads, iter, range, dir1);
+      Map<String,Document> docs = indexRandom(nThreads, iter, range, dir1);
       indexSerial(docs, dir2);
       verifyEquals(dir1, dir2, "id");
     }
@@ -106,9 +106,9 @@
 
   static Term idTerm = new Term("id","");
   IndexingThread[] threads;
-  static Comparator fieldNameComparator = new Comparator() {
-        public int compare(Object o1, Object o2) {
-          return ((Fieldable)o1).name().compareTo(((Fieldable)o2).name());
+  static Comparator<Fieldable> fieldNameComparator = new Comparator<Fieldable>() {
+        public int compare(Fieldable o1, Fieldable o2) {
+          return o1.name().compareTo(o2.name());
         }
   };
 
@@ -117,12 +117,12 @@
   // everything.
   
   public static class DocsAndWriter {
-    Map docs;
+    Map<String,Document> docs;
     IndexWriter writer;
   }
   
   public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
-    Map docs = new HashMap();
+    Map<String,Document> docs = new HashMap<String,Document>();
     IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
     w.setUseCompoundFile(false);
 
@@ -172,8 +172,8 @@
     return dw;
   }
   
-  public Map indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
-    Map docs = new HashMap();
+  public Map<String,Document> indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
+    Map<String,Document> docs = new HashMap<String,Document>();
     for(int iter=0;iter<3;iter++) {
       IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
       w.setUseCompoundFile(false);
@@ -217,14 +217,14 @@
   }
 
   
-  public static void indexSerial(Map docs, Directory dir) throws IOException {
+  public static void indexSerial(Map<String,Document> docs, Directory dir) throws IOException {
     IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
 
     // index all docs in a single thread
-    Iterator iter = docs.values().iterator();
+    Iterator<Document> iter = docs.values().iterator();
     while (iter.hasNext()) {
-      Document d = (Document)iter.next();
-      ArrayList fields = new ArrayList();
+      Document d = iter.next();
+      ArrayList<Fieldable> fields = new ArrayList<Fieldable>();
       fields.addAll(d.getFields());
       // put fields in same order each time
       Collections.sort(fields, fieldNameComparator);
@@ -232,7 +232,7 @@
       Document d1 = new Document();
       d1.setBoost(d.getBoost());
       for (int i=0; i<fields.size(); i++) {
-        d1.add((Fieldable) fields.get(i));
+        d1.add(fields.get(i));
       }
       w.addDocument(d1);
       // System.out.println("indexing "+d1);
@@ -391,8 +391,8 @@
   }
 
   public static void verifyEquals(Document d1, Document d2) {
-    List ff1 = d1.getFields();
-    List ff2 = d2.getFields();
+    List<Fieldable> ff1 = d1.getFields();
+    List<Fieldable> ff2 = d2.getFields();
 
     Collections.sort(ff1, fieldNameComparator);
     Collections.sort(ff2, fieldNameComparator);
@@ -405,8 +405,8 @@
 
 
     for (int i=0; i<ff1.size(); i++) {
-      Fieldable f1 = (Fieldable)ff1.get(i);
-      Fieldable f2 = (Fieldable)ff2.get(i);
+      Fieldable f1 = ff1.get(i);
+      Fieldable f2 = ff2.get(i);
       if (f1.isBinary()) {
         assert(f2.isBinary());
         //TODO
@@ -480,7 +480,7 @@
     int base;
     int range;
     int iterations;
-    Map docs = new HashMap();  // Map<String,Document>
+    Map<String,Document> docs = new HashMap<String,Document>();  
     Random r;
 
     public int nextInt(int lim) {
@@ -561,7 +561,7 @@
     public void indexDoc() throws IOException {
       Document d = new Document();
 
-      ArrayList fields = new ArrayList();      
+      ArrayList<Field> fields = new ArrayList<Field>();      
       String idString = getIdString();
       Field idField =  new Field(idTerm.field(), idString, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
       fields.add(idField);
@@ -609,7 +609,7 @@
       }
 
       for (int i=0; i<fields.size(); i++) {
-        d.add((Fieldable) fields.get(i));
+        d.add(fields.get(i));
       }
       w.updateDocument(idTerm.createTerm(idString), d);
       // System.out.println("indexing "+d);



Mime
View raw message