lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From markrmil...@apache.org
Subject svn commit: r827772 [4/6] - in /lucene/java/branches/flex_1458: ./ contrib/ contrib/instantiated/src/java/org/apache/lucene/store/instantiated/ contrib/misc/src/java/org/apache/lucene/queryParser/precedence/ contrib/queries/src/java/org/apache/lucene/s...
Date Tue, 20 Oct 2009 19:58:22 GMT
Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentMerger.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentMerger.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentMerger.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentMerger.java Tue Oct 20 19:58:18 2009
@@ -20,7 +20,7 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Iterator;
+
 import java.util.List;
 
 import org.apache.lucene.document.Document;
@@ -57,7 +57,7 @@
   private String segment;
   private int termIndexInterval = IndexWriter.DEFAULT_TERM_INDEX_INTERVAL;
 
-  private List readers = new ArrayList();
+  private List<IndexReader> readers = new ArrayList<IndexReader>();
   private FieldInfos fieldInfos;
   
   private int mergedDocs;
@@ -176,23 +176,23 @@
    * @throws IOException
    */
   final void closeReaders() throws IOException {
-    for (Iterator iter = readers.iterator(); iter.hasNext();) {
-      ((IndexReader) iter.next()).close();
+    for (final IndexReader reader : readers) {
+      reader.close();
     }
   }
 
-  final List createCompoundFile(String fileName) throws IOException {
+  final List<String> createCompoundFile(String fileName) throws IOException {
     // nocommit -- messy!
     final SegmentWriteState state = new SegmentWriteState(null, directory, segment, fieldInfos, null, mergedDocs, 0, 0, Codecs.getDefault());
     return createCompoundFile(fileName, new SegmentInfo(segment, mergedDocs, directory,
                                                         Codecs.getDefault().getWriter(state)));
   }
 
-  final List createCompoundFile(String fileName, final SegmentInfo info)
+  final List<String> createCompoundFile(String fileName, final SegmentInfo info)
           throws IOException {
     CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, fileName, checkAbort);
 
-    List files = new ArrayList();
+    List<String> files = new ArrayList<String>();
 
     // Basic files
     for (int i = 0; i < IndexFileNames.COMPOUND_EXTENSIONS_NOT_CODEC.length; i++) {
@@ -229,9 +229,8 @@
     }
 
     // Now merge all added files
-    Iterator it = files.iterator();
-    while (it.hasNext()) {
-      cfsWriter.addFile((String) it.next());
+    for (String file : files) {
+      cfsWriter.addFile(file);
     }
     
     // Perform the merge
@@ -241,13 +240,11 @@
   }
 
   private void addIndexed(IndexReader reader, FieldInfos fInfos,
-      Collection names, boolean storeTermVectors,
+      Collection<String> names, boolean storeTermVectors,
       boolean storePositionWithTermVector, boolean storeOffsetWithTermVector,
       boolean storePayloads, boolean omitTFAndPositions)
       throws IOException {
-    Iterator i = names.iterator();
-    while (i.hasNext()) {
-      String field = (String) i.next();
+    for (String field : names) {
       fInfos.add(field, true, storeTermVectors,
           storePositionWithTermVector, storeOffsetWithTermVector, !reader
               .hasNorms(field), storePayloads, omitTFAndPositions);
@@ -309,8 +306,7 @@
       fieldInfos = new FieldInfos();		  // merge field names
     }
 
-    for (Iterator iter = readers.iterator(); iter.hasNext();) {
-      IndexReader reader = (IndexReader) iter.next();
+    for (IndexReader reader : readers) {
       if (reader instanceof SegmentReader) {
         SegmentReader segmentReader = (SegmentReader) reader;
         FieldInfos readerFieldInfos = segmentReader.fieldInfos();
@@ -345,8 +341,7 @@
 
       try {
         int idx = 0;
-        for (Iterator iter = readers.iterator(); iter.hasNext();) {
-          final IndexReader reader = (IndexReader) iter.next();
+        for (IndexReader reader : readers) {
           final SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
           FieldsReader matchingFieldsReader = null;
           if (matchingSegmentReader != null) {
@@ -382,8 +377,8 @@
       // If we are skipping the doc stores, that means there
       // are no deletions in any of these segments, so we
       // just sum numDocs() of each segment to get total docCount
-      for (Iterator iter = readers.iterator(); iter.hasNext();) {
-        docCount += ((IndexReader) iter.next()).numDocs();
+      for (final IndexReader reader : readers) {
+        docCount += reader.numDocs();
       }
 
     return docCount;
@@ -473,7 +468,7 @@
 
     try {
       int idx = 0;
-      for (Iterator iter = readers.iterator(); iter.hasNext();) {
+      for (final IndexReader reader : readers) {
         final SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
         TermVectorsReader matchingVectorsReader = null;
         if (matchingSegmentReader != null) {
@@ -484,7 +479,6 @@
             matchingVectorsReader = vectorsReader;
           }
         }
-        final IndexReader reader = (IndexReader) iter.next();
         if (reader.hasDeletions()) {
           copyVectorsWithDeletions(termVectorsWriter, matchingVectorsReader, reader);
         } else {
@@ -640,7 +634,7 @@
     while (fieldsQueue.size() > 0) {
 
       while(true) {
-        SegmentMergeInfo smi = (SegmentMergeInfo) fieldsQueue.pop();
+        SegmentMergeInfo smi = fieldsQueue.pop();
         if (smi.nextTerm()) {
           termsQueue.add(smi);
         } else if (smi.nextField()) {
@@ -649,7 +643,7 @@
         } else {
           // done with a segment
         }
-        SegmentMergeInfo top = (SegmentMergeInfo) fieldsQueue.top();
+        SegmentMergeInfo top = fieldsQueue.top();
         if (top == null || (termsQueue.size() > 0 && ((SegmentMergeInfo) termsQueue.top()).field != top.field)) {
           break;
         }
@@ -658,7 +652,7 @@
       if (termsQueue.size() > 0) {          
         // merge one field
 
-        final String field  = ((SegmentMergeInfo) termsQueue.top()).field;
+        final String field  = termsQueue.top().field;
         if (Codec.DEBUG) {
           System.out.println("merge field=" + field + " segCount=" + termsQueue.size());
         }
@@ -670,8 +664,8 @@
           // pop matching terms
           int matchSize = 0;
           while(true) {
-            match[matchSize++] = (SegmentMergeInfo) termsQueue.pop();
-            SegmentMergeInfo top = (SegmentMergeInfo) termsQueue.top();
+            match[matchSize++] = termsQueue.pop();
+            SegmentMergeInfo top = termsQueue.top();
             if (top == null || !top.term.termEquals(match[0].term)) {
               break;
             }
@@ -814,8 +808,7 @@
             output = directory.createOutput(segment + "." + IndexFileNames.NORMS_EXTENSION);
             output.writeBytes(NORMS_HEADER,NORMS_HEADER.length);
           }
-          for (Iterator iter = readers.iterator(); iter.hasNext();) {
-            IndexReader reader = (IndexReader) iter.next();
+          for ( IndexReader reader : readers) {
             int maxDoc = reader.maxDoc();
             if (normBuffer == null || normBuffer.length < maxDoc) {
               // the buffer is too small for the current segment

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentReader.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentReader.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentReader.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentReader.java Tue Oct 20 19:58:18 2009
@@ -23,7 +23,7 @@
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
+
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -59,8 +59,8 @@
   private int readBufferSize;
   boolean isPreFlex;
 
-  CloseableThreadLocal fieldsReaderLocal = new FieldsReaderLocal();
-  CloseableThreadLocal termVectorsLocal = new CloseableThreadLocal();
+  CloseableThreadLocal<FieldsReader> fieldsReaderLocal = new FieldsReaderLocal();
+  CloseableThreadLocal<TermVectorsReader> termVectorsLocal = new CloseableThreadLocal<TermVectorsReader>();
 
   BitVector deletedDocs = null;
   Ref deletedDocsRef = null;
@@ -254,9 +254,9 @@
   /**
    * Sets the initial value 
    */
-  private class FieldsReaderLocal extends CloseableThreadLocal {
-    protected Object initialValue() {
-      return core.getFieldsReaderOrig().clone();
+  private class FieldsReaderLocal extends CloseableThreadLocal<FieldsReader> {
+    protected FieldsReader initialValue() {
+      return (FieldsReader) core.getFieldsReaderOrig().clone();
     }
   }
   
@@ -503,7 +503,7 @@
     }
   }
 
-  Map norms = new HashMap();
+  Map<String,Norm> norms = new HashMap<String,Norm>();
   
   /** The class which implements SegmentReader. */
   // @deprecated (LUCENE-1677)
@@ -751,7 +751,7 @@
         }
       }
 
-      clone.norms = new HashMap();
+      clone.norms = new HashMap<String,Norm>();
 
       // Clone norms
       for (int i = 0; i < fieldNormsChanged.length; i++) {
@@ -759,9 +759,9 @@
         // Clone unchanged norms to the cloned reader
         if (doClone || !fieldNormsChanged[i]) {
           final String curField = core.fieldInfos.fieldInfo(i).name;
-          Norm norm = (Norm) this.norms.get(curField);
+          Norm norm = this.norms.get(curField);
           if (norm != null)
-            clone.norms.put(curField, norm.clone());
+            clone.norms.put(curField, (Norm) norm.clone());
         }
       }
 
@@ -781,7 +781,7 @@
     return clone;
   }
 
-  protected void doCommit(Map commitUserData) throws IOException {
+  protected void doCommit(Map<String,String> commitUserData) throws IOException {
     if (hasChanges) {
       if (deletedDocsDirty) {               // re-write deleted
         si.advanceDelGen();
@@ -800,9 +800,7 @@
 
       if (normsDirty) {               // re-write norms
         si.setNumFields(core.fieldInfos.size());
-        Iterator it = norms.values().iterator();
-        while (it.hasNext()) {
-          Norm norm = (Norm) it.next();
+        for (final Norm norm : norms.values()) {
           if (norm.dirty) {
             norm.reWrite(si);
           }
@@ -815,7 +813,7 @@
   }
 
   FieldsReader getFieldsReader() {
-    return (FieldsReader) fieldsReaderLocal.get();
+    return fieldsReaderLocal.get();
   }
 
   protected void doClose() throws IOException {
@@ -828,9 +826,8 @@
       deletedDocs = null;
     }
 
-    Iterator it = norms.values().iterator();
-    while (it.hasNext()) {
-      ((Norm) it.next()).decRef();
+    for (final Norm norm : norms.values()) {
+      norm.decRef();
     }
     if (core != null) {
       core.decRef();
@@ -890,8 +887,8 @@
     }
   }
 
-  List files() throws IOException {
-    return new ArrayList(si.files());
+  List<String> files() throws IOException {
+    return new ArrayList<String>(si.files());
   }
 
   public TermEnum terms() throws IOException {
@@ -1042,10 +1039,10 @@
   /**
    * @see IndexReader#getFieldNames(IndexReader.FieldOption fldOption)
    */
-  public Collection getFieldNames(IndexReader.FieldOption fieldOption) {
+  public Collection<String> getFieldNames(IndexReader.FieldOption fieldOption) {
     ensureOpen();
 
-    Set fieldSet = new HashSet();
+    Set<String> fieldSet = new HashSet<String>();
     for (int i = 0; i < core.fieldInfos.size(); i++) {
       FieldInfo fi = core.fieldInfos.fieldInfo(i);
       if (fieldOption == IndexReader.FieldOption.ALL) {
@@ -1105,7 +1102,7 @@
 
   // can return null if norms aren't stored
   protected synchronized byte[] getNorms(String field) throws IOException {
-    Norm norm = (Norm) norms.get(field);
+    Norm norm = norms.get(field);
     if (norm == null) return null;  // not indexed, or norms not stored
     return norm.bytes();
   }
@@ -1119,7 +1116,7 @@
 
   protected void doSetNorm(int doc, String field, byte value)
           throws IOException {
-    Norm norm = (Norm) norms.get(field);
+    Norm norm = norms.get(field);
     if (norm == null)                             // not an indexed field
       return;
 
@@ -1132,7 +1129,7 @@
     throws IOException {
 
     ensureOpen();
-    Norm norm = (Norm) norms.get(field);
+    Norm norm = norms.get(field);
     if (norm == null) {
       Arrays.fill(bytes, offset, bytes.length, DefaultSimilarity.encodeNorm(1.0f));
       return;
@@ -1201,9 +1198,7 @@
     if (singleNormStream != null) {
       return false;
     }
-    Iterator it = norms.values().iterator();
-    while (it.hasNext()) {
-      Norm norm = (Norm) it.next();
+    for (final Norm norm : norms.values()) {
       if (norm.refCount > 0) {
         return false;
       }
@@ -1213,8 +1208,7 @@
 
   // for testing only
   boolean normsClosed(String field) {
-    Norm norm = (Norm) norms.get(field);
-    return norm.refCount == 0;
+    return norms.get(field).refCount == 0;
   }
 
   /**
@@ -1222,7 +1216,7 @@
    * @return TermVectorsReader
    */
   TermVectorsReader getTermVectorsReader() {
-    TermVectorsReader tvReader = (TermVectorsReader) termVectorsLocal.get();
+    TermVectorsReader tvReader = termVectorsLocal.get();
     if (tvReader == null) {
       TermVectorsReader orig = core.getTermVectorsReaderOrig();
       if (orig == null) {
@@ -1330,9 +1324,7 @@
     rollbackDeletedDocsDirty = deletedDocsDirty;
     rollbackNormsDirty = normsDirty;
     rollbackPendingDeleteCount = pendingDeleteCount;
-    Iterator it = norms.values().iterator();
-    while (it.hasNext()) {
-      Norm norm = (Norm) it.next();
+    for (Norm norm : norms.values()) {
       norm.rollbackDirty = norm.dirty;
     }
   }
@@ -1342,9 +1334,7 @@
     deletedDocsDirty = rollbackDeletedDocsDirty;
     normsDirty = rollbackNormsDirty;
     pendingDeleteCount = rollbackPendingDeleteCount;
-    Iterator it = norms.values().iterator();
-    while (it.hasNext()) {
-      Norm norm = (Norm) it.next();
+    for (Norm norm : norms.values()) {
       norm.dirty = norm.rollbackDirty;
     }
   }

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentWriteState.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentWriteState.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentWriteState.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentWriteState.java Tue Oct 20 19:58:18 2009
@@ -42,7 +42,7 @@
   public int numDocs;
   int numDocsInStore;
   // nocommit -- made public
-  public Collection flushedFiles;
+  public Collection<String> flushedFiles;
 
   // Actual codec used
   Codec codec;
@@ -82,7 +82,7 @@
     this.numDocsInStore = numDocsInStore;
     this.termIndexInterval = termIndexInterval;
     this.codec = codecs.getWriter(this);
-    flushedFiles = new HashSet();
+    flushedFiles = new HashSet<String>();
   }
 
   public String segmentFileName(String ext) {

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java Tue Oct 20 19:58:18 2009
@@ -52,14 +52,14 @@
     this.primary = primary;
   }
 
-  public synchronized void onInit(List commits) throws IOException {
+  public synchronized void onInit(List<? extends IndexCommit> commits) throws IOException {
     primary.onInit(wrapCommits(commits));
-    lastCommit = (IndexCommit) commits.get(commits.size()-1);
+    lastCommit = commits.get(commits.size()-1);
   }
 
-  public synchronized void onCommit(List commits) throws IOException {
+  public synchronized void onCommit(List<? extends IndexCommit> commits) throws IOException {
     primary.onCommit(wrapCommits(commits));
-    lastCommit = (IndexCommit) commits.get(commits.size()-1);
+    lastCommit = commits.get(commits.size()-1);
   }
 
   /** Take a snapshot of the most recent commit to the
@@ -95,7 +95,7 @@
     public String getSegmentsFileName() {
       return cp.getSegmentsFileName();
     }
-    public Collection getFileNames() throws IOException {
+    public Collection<String> getFileNames() throws IOException {
       return cp.getFileNames();
     }
     public Directory getDirectory() {
@@ -118,16 +118,16 @@
     public long getGeneration() {
       return cp.getGeneration();
     }
-    public Map getUserData() throws IOException {
+    public Map<String,String> getUserData() throws IOException {
       return cp.getUserData();
     }
   }
 
-  private List wrapCommits(List commits) {
+  private List<IndexCommit> wrapCommits(List<? extends IndexCommit> commits) {
     final int count = commits.size();
-    List myCommits = new ArrayList(count);
+    List<IndexCommit> myCommits = new ArrayList<IndexCommit>(count);
     for(int i=0;i<count;i++)
-      myCommits.add(new MyCommitPoint((IndexCommit) commits.get(i)));
+      myCommits.add(new MyCommitPoint(commits.get(i)));
     return myCommits;
   }
 }

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SortedTermVectorMapper.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SortedTermVectorMapper.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SortedTermVectorMapper.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SortedTermVectorMapper.java Tue Oct 20 19:58:18 2009
@@ -29,8 +29,8 @@
 public class SortedTermVectorMapper extends TermVectorMapper{
 
 
-  private SortedSet currentSet;
-  private Map termToTVE = new HashMap();
+  private SortedSet<TermVectorEntry> currentSet;
+  private Map<String,TermVectorEntry> termToTVE = new HashMap<String,TermVectorEntry>();
   private boolean storeOffsets;
   private boolean storePositions;
   /**
@@ -42,14 +42,14 @@
    *
    * @param comparator A Comparator for sorting {@link TermVectorEntry}s
    */
-  public SortedTermVectorMapper(Comparator comparator) {
+  public SortedTermVectorMapper(Comparator<TermVectorEntry> comparator) {
     this(false, false, comparator);
   }
 
 
-  public SortedTermVectorMapper(boolean ignoringPositions, boolean ignoringOffsets, Comparator comparator) {
+  public SortedTermVectorMapper(boolean ignoringPositions, boolean ignoringOffsets, Comparator<TermVectorEntry> comparator) {
     super(ignoringPositions, ignoringOffsets);
-    currentSet = new TreeSet(comparator);
+    currentSet = new TreeSet<TermVectorEntry>(comparator);
   }
 
   /**
@@ -121,7 +121,7 @@
    *
    * @return The SortedSet of {@link TermVectorEntry}.
    */
-  public SortedSet getTermVectorEntrySet()
+  public SortedSet<TermVectorEntry> getTermVectorEntrySet()
   {
     return currentSet;
   }

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermDocs.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermDocs.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermDocs.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermDocs.java Tue Oct 20 19:58:18 2009
@@ -18,6 +18,7 @@
  */
 
 import java.io.IOException;
+import java.io.Closeable;
 
 /** TermDocs provides an interface for enumerating &lt;document, frequency&gt;
  pairs for a term.  <p> The document portion names each document containing
@@ -29,7 +30,7 @@
  @deprecated Use {@link DocsEnum} instead
 */
 
-public interface TermDocs {
+public interface TermDocs extends Closeable {
   /** Sets this to the data for a term.
    * The enumeration is reset to the start of the data for this term.
    */

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermEnum.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermEnum.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermEnum.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermEnum.java Tue Oct 20 19:58:18 2009
@@ -18,6 +18,7 @@
  */
 
 import java.io.IOException;
+import java.io.Closeable;
 
 /** Abstract class for enumerating terms.
 
@@ -25,7 +26,7 @@
   the enumeration is greater than all that precede it.
 * @deprecated Use TermsEnum instead */
 
-public abstract class TermEnum {
+public abstract class TermEnum implements Closeable {
   /** Increments the enumeration to the next element.  True if one exists.*/
   public abstract boolean next() throws IOException;
 

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermVectorEntryFreqSortedComparator.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermVectorEntryFreqSortedComparator.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermVectorEntryFreqSortedComparator.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermVectorEntryFreqSortedComparator.java Tue Oct 20 19:58:18 2009
@@ -23,11 +23,9 @@
  * the term (case-sensitive)
  *
  **/
-public class TermVectorEntryFreqSortedComparator implements Comparator {
-  public int compare(Object object, Object object1) {
+public class TermVectorEntryFreqSortedComparator implements Comparator<TermVectorEntry> {
+  public int compare(TermVectorEntry entry, TermVectorEntry entry1) {
     int result = 0;
-    TermVectorEntry entry = (TermVectorEntry) object;
-    TermVectorEntry entry1 = (TermVectorEntry) object1;
     result = entry1.getFrequency() - entry.getFrequency();
     if (result == 0)
     {

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java Tue Oct 20 19:58:18 2009
@@ -23,7 +23,7 @@
 
 import java.io.IOException;
 import java.util.Collection;
-import java.util.Iterator;
+
 import java.util.Map;
 
 final class TermVectorsTermsWriter extends TermsHashConsumer {
@@ -51,7 +51,7 @@
       postings[i] = new PostingList();
   }
 
-  synchronized void flush(Map threadsAndFields, final SegmentWriteState state) throws IOException {
+  synchronized void flush(Map<TermsHashConsumerPerThread,Collection<TermsHashConsumerPerField>> threadsAndFields, final SegmentWriteState state) throws IOException {
 
     if (tvx != null) {
 
@@ -65,12 +65,9 @@
       tvf.flush();
     }
 
-    Iterator it = threadsAndFields.entrySet().iterator();
-    while(it.hasNext()) {
-      Map.Entry entry = (Map.Entry) it.next();
-      Iterator it2 = ((Collection) entry.getValue()).iterator();
-      while(it2.hasNext()) {
-        TermVectorsTermsWriterPerField perField = (TermVectorsTermsWriterPerField) it2.next();
+    for (Map.Entry<TermsHashConsumerPerThread,Collection<TermsHashConsumerPerField>> entry : threadsAndFields.entrySet()) {
+      for (final TermsHashConsumerPerField field : entry.getValue() ) {
+        TermVectorsTermsWriterPerField perField = (TermVectorsTermsWriterPerField) field;
         perField.termsHashPerField.reset();
         perField.shrinkHash();
       }

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermsHash.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermsHash.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermsHash.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermsHash.java Tue Oct 20 19:58:18 2009
@@ -103,7 +103,7 @@
       nextTermsHash.closeDocStore(state);
   }
 
-  synchronized void flush(Map threadsAndFields, final SegmentWriteState state) throws IOException {
+  synchronized void flush(Map<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>> threadsAndFields, final SegmentWriteState state) throws IOException {
     Map childThreadsAndFields = new HashMap();
     Map nextThreadsAndFields;
 

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermsHashConsumer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermsHashConsumer.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermsHashConsumer.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermsHashConsumer.java Tue Oct 20 19:58:18 2009
@@ -18,13 +18,14 @@
  */
 
 import java.io.IOException;
+import java.util.Collection;
 import java.util.Map;
 
 abstract class TermsHashConsumer {
   abstract int bytesPerPosting();
   abstract void createPostings(RawPostingList[] postings, int start, int count);
   abstract TermsHashConsumerPerThread addThread(TermsHashPerThread perThread);
-  abstract void flush(Map threadsAndFields, final SegmentWriteState state) throws IOException;
+  abstract void flush(Map<TermsHashConsumerPerThread,Collection<TermsHashConsumerPerField>> threadsAndFields, final SegmentWriteState state) throws IOException;
   abstract void abort();
   abstract void closeDocStore(SegmentWriteState state) throws IOException;
 

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/codecs/Codecs.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/codecs/Codecs.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/codecs/Codecs.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/codecs/Codecs.java Tue Oct 20 19:58:18 2009
@@ -83,8 +83,8 @@
   }
 
   public Codec getWriter(SegmentWriteState state) {
-    return lookup("Standard");
-    //return lookup("Pulsing");
+    //return lookup("Standard");
+    return lookup("Pulsing");
     //return lookup("Sep");
     //return lookup("IntBlock");
   }

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/CharStream.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/CharStream.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/CharStream.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/CharStream.java Tue Oct 20 19:58:18 2009
@@ -109,4 +109,4 @@
   void Done();
 
 }
-/* JavaCC - OriginalChecksum=a83909a2403f969f94d18375f9f143e4 (do not edit this line) */
+/* JavaCC - OriginalChecksum=32a89423891f765dde472f7ef0e3ef7b (do not edit this line) */

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/ParseException.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/ParseException.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/ParseException.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/ParseException.java Tue Oct 20 19:58:18 2009
@@ -195,4 +195,4 @@
    }
 
 }
-/* JavaCC - OriginalChecksum=c63b396885c4ff44d7aa48d3feae60cd (do not edit this line) */
+/* JavaCC - OriginalChecksum=c7631a240f7446940695eac31d9483ca (do not edit this line) */

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/QueryParser.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/QueryParser.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/QueryParser.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/QueryParser.java Tue Oct 20 19:58:18 2009
@@ -1754,7 +1754,7 @@
       return (jj_ntk = jj_nt.kind);
   }
 
-  private java.util.List jj_expentries = new java.util.ArrayList();
+  private java.util.List<int[]> jj_expentries = new java.util.ArrayList<int[]>();
   private int[] jj_expentry;
   private int jj_kind = -1;
   private int[] jj_lasttokens = new int[100];
@@ -1817,7 +1817,7 @@
     jj_add_error_token(0, 0);
     int[][] exptokseq = new int[jj_expentries.size()][];
     for (int i = 0; i < jj_expentries.size(); i++) {
-      exptokseq[i] = (int[])jj_expentries.get(i);
+      exptokseq[i] = jj_expentries.get(i);
     }
     return new ParseException(token, exptokseq, tokenImage);
   }

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/Token.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/Token.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/Token.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/Token.java Tue Oct 20 19:58:18 2009
@@ -121,4 +121,4 @@
   }
 
 }
-/* JavaCC - OriginalChecksum=37b1923f964a5a434f5ea3d6952ff200 (do not edit this line) */
+/* JavaCC - OriginalChecksum=c147cc166a7cf8812c7c39bc8c5eb868 (do not edit this line) */

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/TokenMgrError.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/TokenMgrError.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/TokenMgrError.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/TokenMgrError.java Tue Oct 20 19:58:18 2009
@@ -3,6 +3,7 @@
 package org.apache.lucene.queryParser;
 
 /** Token Manager Error. */
+@SuppressWarnings("serial")
 public class TokenMgrError extends Error
 {
 
@@ -137,4 +138,4 @@
       this(LexicalError(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason);
    }
 }
-/* JavaCC - OriginalChecksum=55cddb2336a66b376c0bb59d916b326d (do not edit this line) */
+/* JavaCC - OriginalChecksum=1c94e13236c7e0121e49427992341ee3 (do not edit this line) */

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/CachingSpanFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/CachingSpanFilter.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/CachingSpanFilter.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/CachingSpanFilter.java Tue Oct 20 19:58:18 2009
@@ -19,7 +19,7 @@
 import org.apache.lucene.index.IndexReader;
 
 import java.io.IOException;
-import java.util.BitSet;
+
 import java.util.Map;
 import java.util.WeakHashMap;
 
@@ -33,7 +33,7 @@
   /**
    * A transient Filter cache.
    */
-  protected transient Map cache;
+  protected transient Map<IndexReader,SpanFilterResult> cache;
 
   /**
    * @param filter Filter to cache results of
@@ -50,11 +50,11 @@
   private SpanFilterResult getCachedResult(IndexReader reader) throws IOException {
     SpanFilterResult result = null;
     if (cache == null) {
-      cache = new WeakHashMap();
+      cache = new WeakHashMap<IndexReader,SpanFilterResult>();
     }
 
     synchronized (cache) {  // check cache
-      result = (SpanFilterResult) cache.get(reader);
+      result = cache.get(reader);
       if (result == null) {
         result = filter.bitSpans(reader);
         cache.put(reader, result);

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/ConjunctionScorer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/ConjunctionScorer.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/ConjunctionScorer.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/ConjunctionScorer.java Tue Oct 20 19:58:18 2009
@@ -29,8 +29,8 @@
   private final float coord;
   private int lastDoc = -1;
 
-  public ConjunctionScorer(Similarity similarity, Collection scorers) throws IOException {
-    this(similarity, (Scorer[]) scorers.toArray(new Scorer[scorers.size()]));
+  public ConjunctionScorer(Similarity similarity, Collection<Scorer> scorers) throws IOException {
+    this(similarity, scorers.toArray(new Scorer[scorers.size()]));
   }
 
   public ConjunctionScorer(Similarity similarity, Scorer[] scorers) throws IOException {
@@ -52,9 +52,9 @@
     // it will already start off sorted (all scorers on same doc).
     
     // note that this comparator is not consistent with equals!
-    Arrays.sort(scorers, new Comparator() {         // sort the array
-      public int compare(Object o1, Object o2) {
-        return ((Scorer) o1).docID() - ((Scorer) o2).docID();
+    Arrays.sort(scorers, new Comparator<Scorer>() {         // sort the array
+      public int compare(Scorer o1, Scorer o2) {
+        return o1.docID() - o2.docID();
       }
     });
 

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/Explanation.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/Explanation.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/Explanation.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/Explanation.java Tue Oct 20 19:58:18 2009
@@ -24,7 +24,7 @@
 public class Explanation implements java.io.Serializable {
   private float value;                            // the value of this node
   private String description;                     // what it represents
-  private ArrayList details;                      // sub-explanations
+  private ArrayList<Explanation> details;                      // sub-explanations
 
   public Explanation() {}
 
@@ -71,13 +71,13 @@
   public Explanation[] getDetails() {
     if (details == null)
       return null;
-    return (Explanation[])details.toArray(new Explanation[0]);
+    return details.toArray(new Explanation[0]);
   }
 
   /** Adds a sub-node to this explanation node. */
   public void addDetail(Explanation detail) {
     if (details == null)
-      details = new ArrayList();
+      details = new ArrayList<Explanation>();
     details.add(detail);
   }
 

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FieldCacheImpl.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FieldCacheImpl.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FieldCacheImpl.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FieldCacheImpl.java Tue Oct 20 19:58:18 2009
@@ -21,13 +21,10 @@
 import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.List;
-import java.util.Locale;
 import java.util.Map;
 import java.util.WeakHashMap;
 
-import org.apache.lucene.document.NumericField; // javadoc
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.Terms;
@@ -49,12 +46,12 @@
  */
 class FieldCacheImpl implements FieldCache {
 	
-  private Map caches;
+  private Map<Class<?>,Cache> caches;
   FieldCacheImpl() {
     init();
   }
   private synchronized void init() {
-    caches = new HashMap(7);
+    caches = new HashMap<Class<?>,Cache>(7);
     caches.put(Byte.TYPE, new ByteCache(this));
     caches.put(Short.TYPE, new ShortCache(this));
     caches.put(Integer.TYPE, new IntCache(this));
@@ -70,23 +67,18 @@
   }
   
   public CacheEntry[] getCacheEntries() {
-    List result = new ArrayList(17);
-    Iterator outerKeys = caches.keySet().iterator();
-    while (outerKeys.hasNext()) {
-      Class cacheType = (Class)outerKeys.next();
-      Cache cache = (Cache)caches.get(cacheType);
-      Iterator innerKeys = cache.readerCache.keySet().iterator();
-      while (innerKeys.hasNext()) {
+    List<CacheEntry> result = new ArrayList<CacheEntry>(17);
+    for(final Class<?> cacheType: caches.keySet()) {
+      Cache cache = caches.get(cacheType);
+      for (final Object readerKey : cache.readerCache.keySet()) {
         // we've now materialized a hard ref
-        Object readerKey = innerKeys.next();
+        
         // innerKeys was backed by WeakHashMap, sanity check
         // that it wasn't GCed before we made hard ref
         if (null != readerKey && cache.readerCache.containsKey(readerKey)) {
-          Map innerCache = ((Map)cache.readerCache.get(readerKey));
-          Iterator entrySetIterator = innerCache.entrySet().iterator();
-          while (entrySetIterator.hasNext()) {
-            Map.Entry mapEntry = (Map.Entry) entrySetIterator.next();
-            Entry entry = (Entry) mapEntry.getKey();
+          Map<Entry, Object> innerCache = cache.readerCache.get(readerKey);
+          for (final Map.Entry<Entry, Object> mapEntry : innerCache.entrySet()) {
+            Entry entry = mapEntry.getKey();
             result.add(new CacheEntryImpl(readerKey, entry.field,
                                           cacheType, entry.custom,
                                           mapEntry.getValue()));
@@ -94,17 +86,17 @@
         }
       }
     }
-    return (CacheEntry[]) result.toArray(new CacheEntry[result.size()]);
+    return result.toArray(new CacheEntry[result.size()]);
   }
   
   private static final class CacheEntryImpl extends CacheEntry {
     private final Object readerKey;
     private final String fieldName;
-    private final Class cacheType;
+    private final Class<?> cacheType;
     private final Object custom;
     private final Object value;
     CacheEntryImpl(Object readerKey, String fieldName,
-                   Class cacheType,
+                   Class<?> cacheType,
                    Object custom,
                    Object value) {
         this.readerKey = readerKey;
@@ -121,7 +113,7 @@
     }
     public Object getReaderKey() { return readerKey; }
     public String getFieldName() { return fieldName; }
-    public Class getCacheType() { return cacheType; }
+    public Class<?> getCacheType() { return cacheType; }
     public Object getCustom() { return custom; }
     public Object getValue() { return value; }
   }
@@ -146,19 +138,19 @@
 
     final FieldCache wrapper;
 
-    final Map readerCache = new WeakHashMap();
+    final Map<Object,Map<Entry,Object>> readerCache = new WeakHashMap<Object,Map<Entry,Object>>();
     
     protected abstract Object createValue(IndexReader reader, Entry key)
         throws IOException;
 
     public Object get(IndexReader reader, Entry key) throws IOException {
-      Map innerCache;
+      Map<Entry,Object> innerCache;
       Object value;
       final Object readerKey = reader.getFieldCacheKey();
       synchronized (readerCache) {
-        innerCache = (Map) readerCache.get(readerKey);
+        innerCache = readerCache.get(readerKey);
         if (innerCache == null) {
-          innerCache = new HashMap();
+          innerCache = new HashMap<Entry,Object>();
           readerCache.put(readerKey, innerCache);
           value = null;
         } else {
@@ -252,7 +244,7 @@
   // inherit javadocs
   public byte[] getBytes(IndexReader reader, String field, ByteParser parser)
       throws IOException {
-    return (byte[]) ((Cache)caches.get(Byte.TYPE)).get(reader, new Entry(field, parser));
+    return (byte[]) caches.get(Byte.TYPE).get(reader, new Entry(field, parser));
   }
 
   static final class ByteCache extends Cache {
@@ -261,7 +253,7 @@
     }
     protected Object createValue(IndexReader reader, Entry entryKey)
         throws IOException {
-      Entry entry = (Entry) entryKey;
+      Entry entry = entryKey;
       String field = entry.field;
       ByteParser parser = (ByteParser) entry.custom;
       if (parser == null) {
@@ -303,7 +295,7 @@
   // inherit javadocs
   public short[] getShorts(IndexReader reader, String field, ShortParser parser)
       throws IOException {
-    return (short[]) ((Cache)caches.get(Short.TYPE)).get(reader, new Entry(field, parser));
+    return (short[]) caches.get(Short.TYPE).get(reader, new Entry(field, parser));
   }
 
   static final class ShortCache extends Cache {
@@ -313,7 +305,7 @@
 
     protected Object createValue(IndexReader reader, Entry entryKey)
         throws IOException {
-      Entry entry = (Entry) entryKey;
+      Entry entry =  entryKey;
       String field = entry.field;
       ShortParser parser = (ShortParser) entry.custom;
       if (parser == null) {
@@ -355,7 +347,7 @@
   // inherit javadocs
   public int[] getInts(IndexReader reader, String field, IntParser parser)
       throws IOException {
-    return (int[]) ((Cache)caches.get(Integer.TYPE)).get(reader, new Entry(field, parser));
+    return (int[]) caches.get(Integer.TYPE).get(reader, new Entry(field, parser));
   }
 
   static final class IntCache extends Cache {
@@ -365,7 +357,7 @@
 
     protected Object createValue(IndexReader reader, Entry entryKey)
         throws IOException {
-      Entry entry = (Entry) entryKey;
+      Entry entry = entryKey;
       String field = entry.field;
       IntParser parser = (IntParser) entry.custom;
       if (parser == null) {
@@ -425,7 +417,7 @@
   public float[] getFloats(IndexReader reader, String field, FloatParser parser)
     throws IOException {
 
-    return (float[]) ((Cache)caches.get(Float.TYPE)).get(reader, new Entry(field, parser));
+    return (float[]) caches.get(Float.TYPE).get(reader, new Entry(field, parser));
   }
 
   static final class FloatCache extends Cache {
@@ -435,7 +427,7 @@
 
     protected Object createValue(IndexReader reader, Entry entryKey)
         throws IOException {
-      Entry entry = (Entry) entryKey;
+      Entry entry = entryKey;
       String field = entry.field;
       FloatParser parser = (FloatParser) entry.custom;
       if (parser == null) {
@@ -492,7 +484,7 @@
   // inherit javadocs
   public long[] getLongs(IndexReader reader, String field, FieldCache.LongParser parser)
       throws IOException {
-    return (long[]) ((Cache)caches.get(Long.TYPE)).get(reader, new Entry(field, parser));
+    return (long[]) caches.get(Long.TYPE).get(reader, new Entry(field, parser));
   }
 
   static final class LongCache extends Cache {
@@ -559,7 +551,7 @@
   // inherit javadocs
   public double[] getDoubles(IndexReader reader, String field, FieldCache.DoubleParser parser)
       throws IOException {
-    return (double[]) ((Cache)caches.get(Double.TYPE)).get(reader, new Entry(field, parser));
+    return (double[]) caches.get(Double.TYPE).get(reader, new Entry(field, parser));
   }
 
   static final class DoubleCache extends Cache {
@@ -569,7 +561,7 @@
 
     protected Object createValue(IndexReader reader, Entry entryKey)
         throws IOException {
-      Entry entry = (Entry) entryKey;
+      Entry entry = entryKey;
       String field = entry.field;
       FieldCache.DoubleParser parser = (FieldCache.DoubleParser) entry.custom;
       if (parser == null) {
@@ -617,7 +609,7 @@
   // inherit javadocs
   public String[] getStrings(IndexReader reader, String field)
       throws IOException {
-    return (String[]) ((Cache)caches.get(String.class)).get(reader, new Entry(field, (Parser)null));
+    return (String[]) caches.get(String.class).get(reader, new Entry(field, (Parser)null));
   }
 
   static final class StringCache extends Cache {
@@ -657,7 +649,7 @@
   // inherit javadocs
   public StringIndex getStringIndex(IndexReader reader, String field)
       throws IOException {
-    return (StringIndex) ((Cache)caches.get(StringIndex.class)).get(reader, new Entry(field, (Parser)null));
+    return (StringIndex) caches.get(StringIndex.class).get(reader, new Entry(field, (Parser)null));
   }
 
   static final class StringIndexCache extends Cache {

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java Tue Oct 20 19:58:18 2009
@@ -26,20 +26,20 @@
 /**
  * A range filter built on top of a cached single term field (in {@link FieldCache}).
  * 
- * <p>FieldCacheRangeFilter builds a single cache for the field the first time it is used.
- * Each subsequent FieldCacheRangeFilter on the same field then reuses this cache,
+ * <p>{@code FieldCacheRangeFilter} builds a single cache for the field the first time it is used.
+ * Each subsequent {@code FieldCacheRangeFilter} on the same field then reuses this cache,
  * even if the range itself changes. 
  * 
- * <p>This means that FieldCacheRangeFilter is much faster (sometimes more than 100x as fast) 
- * as building a {@link TermRangeFilter} (or {@link ConstantScoreRangeQuery} on a {@link TermRangeFilter})
- * for each query, if using a {@link #newStringRange}. However, if the range never changes it
- * is slower (around 2x as slow) than building a CachingWrapperFilter on top of a single TermRangeFilter.
+ * <p>This means that {@code FieldCacheRangeFilter} is much faster (sometimes more than 100x as fast) 
+ * as building a {@link TermRangeFilter}, if using a {@link #newStringRange}.
+ * However, if the range never changes it is slower (around 2x as slow) than building
+ * a CachingWrapperFilter on top of a single {@link TermRangeFilter}.
  *
  * For numeric data types, this filter may be significantly faster than {@link NumericRangeFilter}.
  * Furthermore, it does not need the numeric values encoded by {@link NumericField}. But
  * it has the problem that it only works with exact one value/document (see below).
  *
- * <p>As with all {@link FieldCache} based functionality, FieldCacheRangeFilter is only valid for 
+ * <p>As with all {@link FieldCache} based functionality, {@code FieldCacheRangeFilter} is only valid for 
  * fields which exact one term for each document (except for {@link #newStringRange}
  * where 0 terms are also allowed). Due to a restriction of {@link FieldCache}, for numeric ranges
  * all terms that do not have a numeric value, 0 is assumed.
@@ -52,15 +52,15 @@
  * that create a correct instance for different data types supported by {@link FieldCache}.
  */
 
-public abstract class FieldCacheRangeFilter extends Filter {
+public abstract class FieldCacheRangeFilter<T> extends Filter {
   final String field;
   final FieldCache.Parser parser;
-  final Object lowerVal;
-  final Object upperVal;
+  final T lowerVal;
+  final T upperVal;
   final boolean includeLower;
   final boolean includeUpper;
   
-  private FieldCacheRangeFilter(String field, FieldCache.Parser parser, Object lowerVal, Object upperVal, boolean includeLower, boolean includeUpper) {
+  private FieldCacheRangeFilter(String field, FieldCache.Parser parser, T lowerVal, T upperVal, boolean includeLower, boolean includeUpper) {
     this.field = field;
     this.parser = parser;
     this.lowerVal = lowerVal;
@@ -73,16 +73,16 @@
   public abstract DocIdSet getDocIdSet(IndexReader reader) throws IOException;
 
   /**
-   * Creates a string range query using {@link FieldCache#getStringIndex}. This works with all
+   * Creates a string range filter using {@link FieldCache#getStringIndex}. This works with all
    * fields containing zero or one term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter newStringRange(String field, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
-    return new FieldCacheRangeFilter(field, null, lowerVal, upperVal, includeLower, includeUpper) {
+  public static FieldCacheRangeFilter<String> newStringRange(String field, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
+    return new FieldCacheRangeFilter<String>(field, null, lowerVal, upperVal, includeLower, includeUpper) {
       public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
         final FieldCache.StringIndex fcsi = FieldCache.DEFAULT.getStringIndex(reader, field);
-        final int lowerPoint = fcsi.binarySearchLookup((String) lowerVal);
-        final int upperPoint = fcsi.binarySearchLookup((String) upperVal);
+        final int lowerPoint = fcsi.binarySearchLookup(lowerVal);
+        final int upperPoint = fcsi.binarySearchLookup(upperVal);
         
         final int inclusiveLowerPoint, inclusiveUpperPoint;
 
@@ -129,25 +129,25 @@
   }
   
   /**
-   * Creates a numeric range query using {@link FieldCache#getBytes(IndexReader,String)}. This works with all
+   * Creates a numeric range filter using {@link FieldCache#getBytes(IndexReader,String)}. This works with all
    * byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter newByteRange(String field, Byte lowerVal, Byte upperVal, boolean includeLower, boolean includeUpper) {
+  public static FieldCacheRangeFilter<Byte> newByteRange(String field, Byte lowerVal, Byte upperVal, boolean includeLower, boolean includeUpper) {
     return newByteRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
   }
   
   /**
-   * Creates a numeric range query using {@link FieldCache#getBytes(IndexReader,String,FieldCache.ByteParser)}. This works with all
+   * Creates a numeric range filter using {@link FieldCache#getBytes(IndexReader,String,FieldCache.ByteParser)}. This works with all
    * byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter newByteRange(String field, FieldCache.ByteParser parser, Byte lowerVal, Byte upperVal, boolean includeLower, boolean includeUpper) {
-    return new FieldCacheRangeFilter(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+  public static FieldCacheRangeFilter<Byte> newByteRange(String field, FieldCache.ByteParser parser, Byte lowerVal, Byte upperVal, boolean includeLower, boolean includeUpper) {
+    return new FieldCacheRangeFilter<Byte>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
       public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
         final byte inclusiveLowerPoint, inclusiveUpperPoint;
         if (lowerVal != null) {
-          final byte i = ((Number) lowerVal).byteValue();
+          final byte i = lowerVal.byteValue();
           if (!includeLower && i == Byte.MAX_VALUE)
             return DocIdSet.EMPTY_DOCIDSET;
           inclusiveLowerPoint = (byte) (includeLower ?  i : (i + 1));
@@ -155,7 +155,7 @@
           inclusiveLowerPoint = Byte.MIN_VALUE;
         }
         if (upperVal != null) {
-          final byte i = ((Number) upperVal).byteValue();
+          final byte i = upperVal.byteValue();
           if (!includeUpper && i == Byte.MIN_VALUE)
             return DocIdSet.EMPTY_DOCIDSET;
           inclusiveUpperPoint = (byte) (includeUpper ? i : (i - 1));
@@ -178,25 +178,25 @@
   }
   
   /**
-   * Creates a numeric range query using {@link FieldCache#getShorts(IndexReader,String)}. This works with all
+   * Creates a numeric range filter using {@link FieldCache#getShorts(IndexReader,String)}. This works with all
    * short fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter newShortRange(String field, Short lowerVal, Short upperVal, boolean includeLower, boolean includeUpper) {
+  public static FieldCacheRangeFilter<Short> newShortRange(String field, Short lowerVal, Short upperVal, boolean includeLower, boolean includeUpper) {
     return newShortRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
   }
   
   /**
-   * Creates a numeric range query using {@link FieldCache#getShorts(IndexReader,String,FieldCache.ShortParser)}. This works with all
+   * Creates a numeric range filter using {@link FieldCache#getShorts(IndexReader,String,FieldCache.ShortParser)}. This works with all
    * short fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter newShortRange(String field, FieldCache.ShortParser parser, Short lowerVal, Short upperVal, boolean includeLower, boolean includeUpper) {
-    return new FieldCacheRangeFilter(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+  public static FieldCacheRangeFilter<Short> newShortRange(String field, FieldCache.ShortParser parser, Short lowerVal, Short upperVal, boolean includeLower, boolean includeUpper) {
+    return new FieldCacheRangeFilter<Short>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
       public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
         final short inclusiveLowerPoint, inclusiveUpperPoint;
         if (lowerVal != null) {
-          short i = ((Number) lowerVal).shortValue();
+          short i = lowerVal.shortValue();
           if (!includeLower && i == Short.MAX_VALUE)
             return DocIdSet.EMPTY_DOCIDSET;
           inclusiveLowerPoint = (short) (includeLower ? i : (i + 1));
@@ -204,7 +204,7 @@
           inclusiveLowerPoint = Short.MIN_VALUE;
         }
         if (upperVal != null) {
-          short i = ((Number) upperVal).shortValue();
+          short i = upperVal.shortValue();
           if (!includeUpper && i == Short.MIN_VALUE)
             return DocIdSet.EMPTY_DOCIDSET;
           inclusiveUpperPoint = (short) (includeUpper ? i : (i - 1));
@@ -227,25 +227,25 @@
   }
   
   /**
-   * Creates a numeric range query using {@link FieldCache#getInts(IndexReader,String)}. This works with all
+   * Creates a numeric range filter using {@link FieldCache#getInts(IndexReader,String)}. This works with all
    * int fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter newIntRange(String field, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) {
+  public static FieldCacheRangeFilter<Integer> newIntRange(String field, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) {
     return newIntRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
   }
   
   /**
-   * Creates a numeric range query using {@link FieldCache#getInts(IndexReader,String,FieldCache.IntParser)}. This works with all
+   * Creates a numeric range filter using {@link FieldCache#getInts(IndexReader,String,FieldCache.IntParser)}. This works with all
    * int fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter newIntRange(String field, FieldCache.IntParser parser, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) {
-    return new FieldCacheRangeFilter(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+  public static FieldCacheRangeFilter<Integer> newIntRange(String field, FieldCache.IntParser parser, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) {
+    return new FieldCacheRangeFilter<Integer>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
       public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
         final int inclusiveLowerPoint, inclusiveUpperPoint;
         if (lowerVal != null) {
-          int i = ((Number) lowerVal).intValue();
+          int i = lowerVal.intValue();
           if (!includeLower && i == Integer.MAX_VALUE)
             return DocIdSet.EMPTY_DOCIDSET;
           inclusiveLowerPoint = includeLower ? i : (i + 1);
@@ -253,7 +253,7 @@
           inclusiveLowerPoint = Integer.MIN_VALUE;
         }
         if (upperVal != null) {
-          int i = ((Number) upperVal).intValue();
+          int i = upperVal.intValue();
           if (!includeUpper && i == Integer.MIN_VALUE)
             return DocIdSet.EMPTY_DOCIDSET;
           inclusiveUpperPoint = includeUpper ? i : (i - 1);
@@ -276,25 +276,25 @@
   }
   
   /**
-   * Creates a numeric range query using {@link FieldCache#getLongs(IndexReader,String)}. This works with all
+   * Creates a numeric range filter using {@link FieldCache#getLongs(IndexReader,String)}. This works with all
    * long fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter newLongRange(String field, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
+  public static FieldCacheRangeFilter<Long> newLongRange(String field, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
     return newLongRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
   }
   
   /**
-   * Creates a numeric range query using {@link FieldCache#getLongs(IndexReader,String,FieldCache.LongParser)}. This works with all
+   * Creates a numeric range filter using {@link FieldCache#getLongs(IndexReader,String,FieldCache.LongParser)}. This works with all
    * long fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter newLongRange(String field, FieldCache.LongParser parser, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
-    return new FieldCacheRangeFilter(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+  public static FieldCacheRangeFilter<Long> newLongRange(String field, FieldCache.LongParser parser, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
+    return new FieldCacheRangeFilter<Long>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
       public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
         final long inclusiveLowerPoint, inclusiveUpperPoint;
         if (lowerVal != null) {
-          long i = ((Number) lowerVal).longValue();
+          long i = lowerVal.longValue();
           if (!includeLower && i == Long.MAX_VALUE)
             return DocIdSet.EMPTY_DOCIDSET;
           inclusiveLowerPoint = includeLower ? i : (i + 1L);
@@ -302,7 +302,7 @@
           inclusiveLowerPoint = Long.MIN_VALUE;
         }
         if (upperVal != null) {
-          long i = ((Number) upperVal).longValue();
+          long i = upperVal.longValue();
           if (!includeUpper && i == Long.MIN_VALUE)
             return DocIdSet.EMPTY_DOCIDSET;
           inclusiveUpperPoint = includeUpper ? i : (i - 1L);
@@ -325,27 +325,27 @@
   }
   
   /**
-   * Creates a numeric range query using {@link FieldCache#getFloats(IndexReader,String)}. This works with all
+   * Creates a numeric range filter using {@link FieldCache#getFloats(IndexReader,String)}. This works with all
    * float fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter newFloatRange(String field, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) {
+  public static FieldCacheRangeFilter<Float> newFloatRange(String field, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) {
     return newFloatRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
   }
   
   /**
-   * Creates a numeric range query using {@link FieldCache#getFloats(IndexReader,String,FieldCache.FloatParser)}. This works with all
+   * Creates a numeric range filter using {@link FieldCache#getFloats(IndexReader,String,FieldCache.FloatParser)}. This works with all
    * float fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter newFloatRange(String field, FieldCache.FloatParser parser, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) {
-    return new FieldCacheRangeFilter(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+  public static FieldCacheRangeFilter<Float> newFloatRange(String field, FieldCache.FloatParser parser, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) {
+    return new FieldCacheRangeFilter<Float>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
       public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
         // we transform the floating point numbers to sortable integers
         // using NumericUtils to easier find the next bigger/lower value
         final float inclusiveLowerPoint, inclusiveUpperPoint;
         if (lowerVal != null) {
-          float f = ((Number) lowerVal).floatValue();
+          float f = lowerVal.floatValue();
           if (!includeUpper && f > 0.0f && Float.isInfinite(f))
             return DocIdSet.EMPTY_DOCIDSET;
           int i = NumericUtils.floatToSortableInt(f);
@@ -354,7 +354,7 @@
           inclusiveLowerPoint = Float.NEGATIVE_INFINITY;
         }
         if (upperVal != null) {
-          float f = ((Number) upperVal).floatValue();
+          float f = upperVal.floatValue();
           if (!includeUpper && f < 0.0f && Float.isInfinite(f))
             return DocIdSet.EMPTY_DOCIDSET;
           int i = NumericUtils.floatToSortableInt(f);
@@ -378,27 +378,27 @@
   }
   
   /**
-   * Creates a numeric range query using {@link FieldCache#getDoubles(IndexReader,String)}. This works with all
+   * Creates a numeric range filter using {@link FieldCache#getDoubles(IndexReader,String)}. This works with all
    * double fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter newDoubleRange(String field, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) {
+  public static FieldCacheRangeFilter<Double> newDoubleRange(String field, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) {
     return newDoubleRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
   }
   
   /**
-   * Creates a numeric range query using {@link FieldCache#getDoubles(IndexReader,String,FieldCache.DoubleParser)}. This works with all
+   * Creates a numeric range filter using {@link FieldCache#getDoubles(IndexReader,String,FieldCache.DoubleParser)}. This works with all
    * double fields containing exactly one numeric term in the field. The range can be half-open by setting one
    * of the values to <code>null</code>.
    */
-  public static FieldCacheRangeFilter newDoubleRange(String field, FieldCache.DoubleParser parser, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) {
-    return new FieldCacheRangeFilter(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
+  public static FieldCacheRangeFilter<Double> newDoubleRange(String field, FieldCache.DoubleParser parser, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) {
+    return new FieldCacheRangeFilter<Double>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
       public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
         // we transform the floating point numbers to sortable integers
         // using NumericUtils to easier find the next bigger/lower value
         final double inclusiveLowerPoint, inclusiveUpperPoint;
         if (lowerVal != null) {
-          double f = ((Number) lowerVal).doubleValue();
+          double f = lowerVal.doubleValue();
           if (!includeUpper && f > 0.0 && Double.isInfinite(f))
             return DocIdSet.EMPTY_DOCIDSET;
           long i = NumericUtils.doubleToSortableLong(f);
@@ -407,7 +407,7 @@
           inclusiveLowerPoint = Double.NEGATIVE_INFINITY;
         }
         if (upperVal != null) {
-          double f = ((Number) upperVal).doubleValue();
+          double f = upperVal.doubleValue();
           if (!includeUpper && f < 0.0 && Double.isInfinite(f))
             return DocIdSet.EMPTY_DOCIDSET;
           long i = NumericUtils.doubleToSortableLong(f);
@@ -464,6 +464,24 @@
     h ^= (includeLower ? 1549299360 : -365038026) ^ (includeUpper ? 1721088258 : 1948649653);
     return h;
   }
+
+  /** Returns the field name for this filter */
+  public String getField() { return field; }
+
+  /** Returns <code>true</code> if the lower endpoint is inclusive */
+  public boolean includesLower() { return includeLower; }
+  
+  /** Returns <code>true</code> if the upper endpoint is inclusive */
+  public boolean includesUpper() { return includeUpper; }
+
+  /** Returns the lower value of this range filter */
+  public T getLowerVal() { return lowerVal; }
+
+  /** Returns the upper value of this range filter */
+  public T getUpperVal() { return upperVal; }
+  
+  /** Returns the current numeric parser ({@code null} for {@code T} is {@code String}} */
+  public FieldCache.Parser getParser() { return parser; }
   
   static abstract class FieldCacheDocIdSet extends DocIdSet {
     private final IndexReader reader;

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FieldValueHitQueue.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FieldValueHitQueue.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FieldValueHitQueue.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FieldValueHitQueue.java Tue Oct 20 19:58:18 2009
@@ -34,7 +34,7 @@
  * @see Searcher#search(Query,Filter,int,Sort)
  * @see FieldCache
  */
-public abstract class FieldValueHitQueue extends PriorityQueue {
+public abstract class FieldValueHitQueue extends PriorityQueue<FieldValueHitQueue.Entry> {
 
   final static class Entry {
     int slot;
@@ -84,9 +84,7 @@
      * @param b ScoreDoc
      * @return <code>true</code> if document <code>a</code> should be sorted after document <code>b</code>.
      */
-    protected boolean lessThan(final Object a, final Object b) {
-      final Entry hitA = (Entry) a;
-      final Entry hitB = (Entry) b;
+    protected boolean lessThan(final Entry hitA, final Entry hitB) {
 
       assert hitA != hitB;
       assert hitA.slot != hitB.slot;
@@ -123,9 +121,7 @@
       initialize(size);
     }
   
-    protected boolean lessThan(final Object a, final Object b) {
-      final Entry hitA = (Entry) a;
-      final Entry hitB = (Entry) b;
+    protected boolean lessThan(final Entry hitA, final Entry hitB) {
 
       assert hitA != hitB;
       assert hitA.slot != hitB.slot;
@@ -194,7 +190,7 @@
   protected final FieldComparator[] comparators;
   protected final int[] reverseMul;
 
-  protected abstract boolean lessThan (final Object a, final Object b);
+  protected abstract boolean lessThan (final Entry a, final Entry b);
 
   /**
    * Given a queue Entry, creates a corresponding FieldDoc

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FilterManager.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FilterManager.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FilterManager.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/FilterManager.java Tue Oct 20 19:58:18 2009
@@ -46,7 +46,7 @@
   protected static final long DEFAULT_CACHE_SLEEP_TIME = 1000 * 60 * 10;
 
   /** The cache itself */
-  protected Map           cache;
+  protected Map<Integer,FilterItem>           cache;
   /** Maximum allowed cache size */
   protected int           cacheCleanSize;
   /** Cache cleaning frequency */
@@ -65,7 +65,7 @@
    * Sets up the FilterManager singleton.
    */
   protected FilterManager() {
-    cache            = new HashMap();
+    cache            = new HashMap<Integer,FilterItem>();
     cacheCleanSize   = DEFAULT_CACHE_CLEAN_SIZE; // Let the cache get to 100 items
     cleanSleepTime   = DEFAULT_CACHE_SLEEP_TIME; // 10 minutes between cleanings
 
@@ -103,7 +103,7 @@
   public Filter getFilter(Filter filter) {
     synchronized(cache) {
       FilterItem fi = null;
-      fi = (FilterItem)cache.get(Integer.valueOf(filter.hashCode()));
+      fi = cache.get(Integer.valueOf(filter.hashCode()));
       if (fi != null) {
         fi.timestamp = new Date().getTime();
         return fi.filter;
@@ -146,14 +146,13 @@
   protected class FilterCleaner implements Runnable  {
 
     private boolean running = true;
-    private TreeSet sortedFilterItems;
+    private TreeSet<Map.Entry<Integer,FilterItem>> sortedFilterItems;
 
     public FilterCleaner() {
-      sortedFilterItems = new TreeSet(new Comparator() {
-        public int compare(Object a, Object b) {
-          if( a instanceof Map.Entry && b instanceof Map.Entry) {
-            FilterItem fia = (FilterItem) ((Map.Entry)a).getValue();
-            FilterItem fib = (FilterItem) ((Map.Entry)b).getValue();
+      sortedFilterItems = new TreeSet<Map.Entry<Integer,FilterItem>>(new Comparator<Map.Entry<Integer,FilterItem>>() {
+        public int compare(Map.Entry<Integer,FilterItem> a, Map.Entry<Integer,FilterItem> b) {
+            FilterItem fia = a.getValue();
+            FilterItem fib = b.getValue();
             if ( fia.timestamp == fib.timestamp ) {
               return 0;
             }
@@ -163,9 +162,7 @@
             }
             // larger timestamp last
             return 1;
-          } else {
-            throw new ClassCastException("Objects are not Map.Entry");
-          }
+          
         }
       });
     }
@@ -180,12 +177,12 @@
           sortedFilterItems.clear();
           synchronized (cache) {
             sortedFilterItems.addAll(cache.entrySet());
-            Iterator it = sortedFilterItems.iterator();
+            Iterator<Map.Entry<Integer,FilterItem>> it = sortedFilterItems.iterator();
             int numToDelete = (int) ((cache.size() - cacheCleanSize) * 1.5);
             int counter = 0;
             // loop over the set and delete all of the cache entries not used in a while
             while (it.hasNext() && counter++ < numToDelete) {
-              Map.Entry entry = (Map.Entry)it.next();
+              Map.Entry<Integer,FilterItem> entry = it.next();
               cache.remove(entry.getKey());
             }
           }

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/IndexSearcher.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/IndexSearcher.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/IndexSearcher.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/IndexSearcher.java Tue Oct 20 19:58:18 2009
@@ -90,9 +90,9 @@
     reader = r;
     this.closeReader = closeReader;
 
-    List subReadersList = new ArrayList();
+    List<IndexReader> subReadersList = new ArrayList<IndexReader>();
     gatherSubReaders(subReadersList, reader);
-    subReaders = (IndexReader[]) subReadersList.toArray(new IndexReader[subReadersList.size()]);
+    subReaders = subReadersList.toArray(new IndexReader[subReadersList.size()]);
     docStarts = new int[subReaders.length];
     int maxDoc = 0;
     for (int i = 0; i < subReaders.length; i++) {
@@ -101,7 +101,7 @@
     }
   }
 
-  protected void gatherSubReaders(List allSubReaders, IndexReader r) {
+  protected void gatherSubReaders(List<IndexReader> allSubReaders, IndexReader r) {
     ReaderUtil.gatherSubReaders(allSubReaders, r);
   }
 

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiPhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiPhraseQuery.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiPhraseQuery.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiPhraseQuery.java Tue Oct 20 19:58:18 2009
@@ -41,8 +41,8 @@
  */
 public class MultiPhraseQuery extends Query {
   private String field;
-  private ArrayList termArrays = new ArrayList();
-  private ArrayList positions = new ArrayList();
+  private ArrayList<Term[]> termArrays = new ArrayList<Term[]>();
+  private ArrayList<Integer> positions = new ArrayList<Integer>();
 
   private int slop = 0;
 
@@ -98,10 +98,10 @@
   }
 
   /**
-   * Returns a List<Term[]> of the terms in the multiphrase.
+   * Returns a List of the terms in the multiphrase.
    * Do not modify the List or its contents.
    */
-  public List getTermArrays() {
+  public List<Term[]> getTermArrays() {
 	  return Collections.unmodifiableList(termArrays);
   }
 
@@ -117,10 +117,9 @@
 
   // inherit javadoc
   public void extractTerms(Set terms) {
-    for (Iterator iter = termArrays.iterator(); iter.hasNext();) {
-      Term[] arr = (Term[])iter.next();
-      for (int i=0; i<arr.length; i++) {
-        terms.add(arr[i]);
+    for (final Term[] arr : termArrays) {
+      for (final Term term: arr) {
+        terms.add(term);
       }
     }
   }
@@ -138,11 +137,9 @@
       this.similarity = getSimilarity(searcher);
 
       // compute idf
-      Iterator i = termArrays.iterator();
-      while (i.hasNext()) {
-        Term[] terms = (Term[])i.next();
-        for (int j=0; j<terms.length; j++) {
-          idf += getSimilarity(searcher).idf(terms[j], searcher);
+      for(final Term[] terms: termArrays) {
+        for (Term term: terms) {
+          idf += getSimilarity(searcher).idf(term, searcher);
         }
       }
     }
@@ -285,9 +282,9 @@
     }
 
     buffer.append("\"");
-    Iterator i = termArrays.iterator();
+    Iterator<Term[]> i = termArrays.iterator();
     while (i.hasNext()) {
-      Term[] terms = (Term[])i.next();
+      Term[] terms = i.next();
       if (terms.length > 1) {
         buffer.append("(");
         for (int j = 0; j < terms.length; j++) {
@@ -337,9 +334,7 @@
   // Breakout calculation of the termArrays hashcode
   private int termArraysHashCode() {
     int hashCode = 1;
-    Iterator iterator = termArrays.iterator();
-    while (iterator.hasNext()) {
-      Term[] termArray = (Term[]) iterator.next();
+    for (final Term[] termArray: termArrays) {
       hashCode = 31 * hashCode
           + (termArray == null ? 0 : arraysHashCode(termArray));
     }
@@ -361,15 +356,15 @@
   }
 
   // Breakout calculation of the termArrays equals
-  private boolean termArraysEquals(List termArrays1, List termArrays2) {
+  private boolean termArraysEquals(List<Term[]> termArrays1, List<Term[]> termArrays2) {
     if (termArrays1.size() != termArrays2.size()) {
       return false;
     }
-    ListIterator iterator1 = termArrays1.listIterator();
-    ListIterator iterator2 = termArrays2.listIterator();
+    ListIterator<Term[]> iterator1 = termArrays1.listIterator();
+    ListIterator<Term[]> iterator2 = termArrays2.listIterator();
     while (iterator1.hasNext()) {
-      Term[] termArray1 = (Term[]) iterator1.next();
-      Term[] termArray2 = (Term[]) iterator2.next();
+      Term[] termArray1 = iterator1.next();
+      Term[] termArray2 = iterator2.next();
       if (!(termArray1 == null ? termArray2 == null : Arrays.equals(termArray1,
           termArray2))) {
         return false;

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiSearcher.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiSearcher.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiSearcher.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiSearcher.java Tue Oct 20 19:58:18 2009
@@ -43,10 +43,10 @@
    * initialize Weights.
    */
   private static class CachedDfSource extends Searcher {
-    private Map dfMap; // Map from Terms to corresponding doc freqs
+    private Map<Term,Integer> dfMap; // Map from Terms to corresponding doc freqs
     private int maxDoc; // document count
 
-    public CachedDfSource(Map dfMap, int maxDoc, Similarity similarity) {
+    public CachedDfSource(Map<Term,Integer> dfMap, int maxDoc, Similarity similarity) {
       this.dfMap = dfMap;
       this.maxDoc = maxDoc;
       setSimilarity(similarity);
@@ -55,7 +55,7 @@
     public int docFreq(Term term) {
       int df;
       try {
-        df = ((Integer) dfMap.get(term)).intValue();
+        df = dfMap.get(term).intValue();
       } catch (NullPointerException e) {
         throw new IllegalArgumentException("df for term " + term.text()
             + " not available");
@@ -305,7 +305,7 @@
     Query rewrittenQuery = rewrite(original);
 
     // step 2
-    Set terms = new HashSet();
+    Set<Term> terms = new HashSet<Term>();
     rewrittenQuery.extractTerms(terms);
 
     // step3
@@ -319,7 +319,7 @@
       }
     }
 
-    HashMap dfMap = new HashMap();
+    HashMap<Term,Integer> dfMap = new HashMap<Term,Integer>();
     for(int i=0; i<allTermsArray.length; i++) {
       dfMap.put(allTermsArray[i], Integer.valueOf(aggregatedDfs[i]));
     }

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiTermQuery.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiTermQuery.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiTermQuery.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiTermQuery.java Tue Oct 20 19:58:18 2009
@@ -21,13 +21,11 @@
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Iterator;
+
 
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermRef;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.util.ToStringUtils;
 import org.apache.lucene.queryParser.QueryParser; // for javadoc
 
 /**
@@ -70,7 +68,7 @@
 
   private static final class ConstantScoreFilterRewrite extends RewriteMethod implements Serializable {
     public Query rewrite(IndexReader reader, MultiTermQuery query) {
-      Query result = new ConstantScoreQuery(new MultiTermQueryWrapperFilter(query));
+      Query result = new ConstantScoreQuery(new MultiTermQueryWrapperFilter<MultiTermQuery>(query));
       result.setBoost(query.getBoost());
       return result;
     }
@@ -245,7 +243,8 @@
       // exhaust the enum before hitting either of the
       // cutoffs, we use ConstantBooleanQueryRewrite; else,
       // ConstantFilterRewrite:
-      final Collection pendingTerms = new ArrayList();
+      final Collection<TermRef> pendingTerms = new ArrayList<TermRef>();
+      final Collection<Term> oldApiPendingTerms = new ArrayList<Term>();
       final int docCountCutoff = (int) ((docCountPercent / 100.) * reader.maxDoc());
       final int termCountLimit = Math.min(BooleanQuery.getMaxClauseCount(), termCountCutoff);
       int docVisitCount = 0;
@@ -259,7 +258,7 @@
           // first term must exist since termsEnum wasn't null
           assert term != null;
           do {
-            pendingTerms.add(term.clone());
+            pendingTerms.add((TermRef) term.clone());
             if (pendingTerms.size() >= termCountLimit || docVisitCount >= docCountCutoff) {
               // Too many terms -- cut our losses now and make a filter.
               Query result = new ConstantScoreQuery(new MultiTermQueryWrapperFilter(query));
@@ -277,10 +276,9 @@
           // Enumeration is done, and we hit a small
           // enough number of terms & docs -- just make a
           // BooleanQuery, now
-          Iterator it = pendingTerms.iterator();
           BooleanQuery bq = new BooleanQuery(true);
-          while(it.hasNext()) {
-            TermQuery tq = new TermQuery(new Term(field, ((TermRef) it.next()).toString()));
+          for(TermRef termRef : pendingTerms) {
+            TermQuery tq = new TermQuery(new Term(field, termRef.toString()));
             bq.add(tq, BooleanClause.Occur.SHOULD);
           }
           // Strip scores
@@ -300,7 +298,7 @@
           while(true) {
             Term t = enumerator.term();
             if (t != null) {
-              pendingTerms.add(t);
+              oldApiPendingTerms.add(t);
               // Loading the TermInfo from the terms dict here
               // should not be costly, because 1) the
               // query/filter will load the TermInfo when it
@@ -317,11 +315,10 @@
               // Enumeration is done, and we hit a small
               // enough number of terms & docs -- just make a
               // BooleanQuery, now
-              Iterator it = pendingTerms.iterator();
               BooleanQuery bq = new BooleanQuery(true);
-              while(it.hasNext()) {
-                TermQuery tq = new TermQuery((Term) it.next());
-                bq.add(tq, BooleanClause.Occur.SHOULD);
+           	  for (final Term term: oldApiPendingTerms) {
+              	TermQuery tq = new TermQuery(term);
+              	bq.add(tq, BooleanClause.Occur.SHOULD);
               }
               // Strip scores
               Query result = new ConstantScoreQuery(new QueryWrapperFilter(bq));
@@ -336,11 +333,13 @@
       }
     }
     
+    @Override
     public int hashCode() {
       final int prime = 1279;
       return (int) (prime * termCountCutoff + Double.doubleToLongBits(docCountPercent));
     }
 
+    @Override
     public boolean equals(Object obj) {
       if (this == obj)
         return true;

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java Tue Oct 20 19:58:18 2009
@@ -43,14 +43,14 @@
  * {@link MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE};
  * this is why it is not abstract.
  */
-public class MultiTermQueryWrapperFilter extends Filter {
+public class MultiTermQueryWrapperFilter<Q extends MultiTermQuery> extends Filter {
     
-  protected final MultiTermQuery query;
+  protected final Q query;
 
   /**
    * Wrap a {@link MultiTermQuery} as a Filter.
    */
-  protected MultiTermQueryWrapperFilter(MultiTermQuery query) {
+  protected MultiTermQueryWrapperFilter(Q query) {
       this.query = query;
   }
   

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/NumericRangeFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/NumericRangeFilter.java?rev=827772&r1=827771&r2=827772&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/NumericRangeFilter.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/NumericRangeFilter.java Tue Oct 20 19:58:18 2009
@@ -45,7 +45,7 @@
  *
  * @since 2.9
  **/
-public final class NumericRangeFilter<T extends Number> extends MultiTermQueryWrapperFilter {
+public final class NumericRangeFilter<T extends Number> extends MultiTermQueryWrapperFilter<NumericRangeQuery<T>> {
 
   private NumericRangeFilter(final NumericRangeQuery<T> query) {
     super(query);
@@ -172,23 +172,18 @@
   }
   
   /** Returns the field name for this filter */
-  @SuppressWarnings("unchecked")
-  public String getField() { return ((NumericRangeQuery<T>)query).getField(); }
+  public String getField() { return query.getField(); }
 
   /** Returns <code>true</code> if the lower endpoint is inclusive */
-  @SuppressWarnings("unchecked")
-  public boolean includesMin() { return ((NumericRangeQuery<T>)query).includesMin(); }
+  public boolean includesMin() { return query.includesMin(); }
   
   /** Returns <code>true</code> if the upper endpoint is inclusive */
-  @SuppressWarnings("unchecked")
-  public boolean includesMax() { return ((NumericRangeQuery<T>)query).includesMax(); }
+  public boolean includesMax() { return query.includesMax(); }
 
   /** Returns the lower value of this range filter */
-  @SuppressWarnings("unchecked")
-  public T getMin() { return ((NumericRangeQuery<T>)query).getMin(); }
+  public T getMin() { return query.getMin(); }
 
   /** Returns the upper value of this range filter */
-  @SuppressWarnings("unchecked")
-  public T getMax() { return ((NumericRangeQuery<T>)query).getMax(); }
+  public T getMax() { return query.getMax(); }
   
 }



Mime
View raw message