lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From markrmil...@apache.org
Subject svn commit: r829454 [5/8] - in /lucene/java/branches/flex_1458: ./ contrib/ contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/ contrib/analyzers/common/src/java/org/apache/l...
Date Sat, 24 Oct 2009 21:23:37 GMT
Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java Sat Oct 24 21:23:15 2009
@@ -35,87 +35,27 @@
  * compatibility when creating StandardAnalyzer:
  * <ul>
  *   <li> As of 2.9, StopFilter preserves position
- *        increments by default
- *   <li> As of 2.9, Tokens incorrectly identified as acronyms
+ *        increments
+ *   <li> As of 2.4, Tokens incorrectly identified as acronyms
  *        are corrected (see <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a>
  * </ul>
  */
 public class StandardAnalyzer extends Analyzer {
-  private Set stopSet;
+  private Set<?> stopSet;
 
   /**
    * Specifies whether deprecated acronyms should be replaced with HOST type.
-   * This is false by default to support backward compatibility.
-   * 
-   * @deprecated this should be removed in the next release (3.0).
-   *
-   * See https://issues.apache.org/jira/browse/LUCENE-1068
+   * See {@linkplain https://issues.apache.org/jira/browse/LUCENE-1068}
    */
-  private boolean replaceInvalidAcronym = defaultReplaceInvalidAcronym;
+  private final boolean replaceInvalidAcronym,enableStopPositionIncrements;
 
-  private static boolean defaultReplaceInvalidAcronym;
-  private boolean enableStopPositionIncrements;
-
-  // @deprecated
-  private boolean useDefaultStopPositionIncrements;
-
-  // Default to true (fixed the bug), unless the system prop is set
-  static {
-    final String v = System.getProperty("org.apache.lucene.analysis.standard.StandardAnalyzer.replaceInvalidAcronym");
-    if (v == null || v.equals("true"))
-      defaultReplaceInvalidAcronym = true;
-    else
-      defaultReplaceInvalidAcronym = false;
-  }
-
-  /**
-   *
-   * @return true if new instances of StandardTokenizer will
-   * replace mischaracterized acronyms
-   *
-   * See https://issues.apache.org/jira/browse/LUCENE-1068
-   * @deprecated This will be removed (hardwired to true) in 3.0
-   */
-  public static boolean getDefaultReplaceInvalidAcronym() {
-    return defaultReplaceInvalidAcronym;
-  }
-
-  /**
-   *
-   * @param replaceInvalidAcronym Set to true to have new
-   * instances of StandardTokenizer replace mischaracterized
-   * acronyms by default.  Set to false to preserve the
-   * previous (before 2.4) buggy behavior.  Alternatively,
-   * set the system property
-   * org.apache.lucene.analysis.standard.StandardAnalyzer.replaceInvalidAcronym
-   * to false.
-   *
-   * See https://issues.apache.org/jira/browse/LUCENE-1068
-   * @deprecated This will be removed (hardwired to true) in 3.0
-   */
-  public static void setDefaultReplaceInvalidAcronym(boolean replaceInvalidAcronym) {
-    defaultReplaceInvalidAcronym = replaceInvalidAcronym;
-  }
-
-
-  /** An array containing some common English words that are usually not
-  useful for searching. 
-  @deprecated Use {@link #STOP_WORDS_SET} instead */
-  public static final String[] STOP_WORDS = StopAnalyzer.ENGLISH_STOP_WORDS;
-  
   /** An unmodifiable set containing some common English words that are usually not
   useful for searching. */
-  public static final Set/*<String>*/ STOP_WORDS_SET = StopAnalyzer.ENGLISH_STOP_WORDS_SET; 
+  public static final Set<?> STOP_WORDS_SET = StopAnalyzer.ENGLISH_STOP_WORDS_SET; 
+  private final Version matchVersion;
 
   /** Builds an analyzer with the default stop words ({@link
    * #STOP_WORDS_SET}).
-   * @deprecated Use {@link #StandardAnalyzer(Version)} instead. */
-  public StandardAnalyzer() {
-    this(Version.LUCENE_24, STOP_WORDS_SET);
-  }
-
-  /** Builds an analyzer with the default stop words ({@link
-   * #STOP_WORDS}).
    * @param matchVersion Lucene version to match See {@link
    * <a href="#version">above</a>}
    */
@@ -124,34 +64,15 @@
   }
 
   /** Builds an analyzer with the given stop words.
-   * @deprecated Use {@link #StandardAnalyzer(Version, Set)}
-   * instead */
-  public StandardAnalyzer(Set stopWords) {
-    this(Version.LUCENE_24, stopWords);
-  }
-
-  /** Builds an analyzer with the given stop words.
    * @param matchVersion Lucene version to match See {@link
    * <a href="#version">above</a>}
    * @param stopWords stop words */
-  public StandardAnalyzer(Version matchVersion, Set stopWords) {
+  public StandardAnalyzer(Version matchVersion, Set<?> stopWords) {
     stopSet = stopWords;
-    init(matchVersion);
-  }
-
-  /** Builds an analyzer with the given stop words.
-   * @deprecated Use {@link #StandardAnalyzer(Version, Set)} instead */
-  public StandardAnalyzer(String[] stopWords) {
-    this(Version.LUCENE_24, StopFilter.makeStopSet(stopWords));
-  }
-
-  /** Builds an analyzer with the stop words from the given file.
-   * @see WordlistLoader#getWordSet(File)
-   * @deprecated Use {@link #StandardAnalyzer(Version, File)}
-   * instead
-   */
-  public StandardAnalyzer(File stopwords) throws IOException {
-    this(Version.LUCENE_24, stopwords);
+    setOverridesTokenStreamMethod(StandardAnalyzer.class);
+    enableStopPositionIncrements = matchVersion.onOrAfter(Version.LUCENE_29);
+    replaceInvalidAcronym = matchVersion.onOrAfter(Version.LUCENE_24);
+    this.matchVersion = matchVersion;
   }
 
   /** Builds an analyzer with the stop words from the given file.
@@ -160,17 +81,7 @@
    * <a href="#version">above</a>}
    * @param stopwords File to read stop words from */
   public StandardAnalyzer(Version matchVersion, File stopwords) throws IOException {
-    stopSet = WordlistLoader.getWordSet(stopwords);
-    init(matchVersion);
-  }
-
-  /** Builds an analyzer with the stop words from the given reader.
-   * @see WordlistLoader#getWordSet(Reader)
-   * @deprecated Use {@link #StandardAnalyzer(Version, Reader)}
-   * instead
-   */
-  public StandardAnalyzer(Reader stopwords) throws IOException {
-    this(Version.LUCENE_24, stopwords);
+    this(matchVersion, WordlistLoader.getWordSet(stopwords));
   }
 
   /** Builds an analyzer with the stop words from the given reader.
@@ -179,98 +90,18 @@
    * <a href="#version">above</a>}
    * @param stopwords Reader to read stop words from */
   public StandardAnalyzer(Version matchVersion, Reader stopwords) throws IOException {
-    stopSet = WordlistLoader.getWordSet(stopwords);
-    init(matchVersion);
-  }
-
-  /**
-   *
-   * @param replaceInvalidAcronym Set to true if this analyzer should replace mischaracterized acronyms in the StandardTokenizer
-   *
-   * See https://issues.apache.org/jira/browse/LUCENE-1068
-   *
-   * @deprecated Remove in 3.X and make true the only valid value
-   */
-  public StandardAnalyzer(boolean replaceInvalidAcronym) {
-    this(Version.LUCENE_24, STOP_WORDS_SET);
-    this.replaceInvalidAcronym = replaceInvalidAcronym;
-    useDefaultStopPositionIncrements = true;
-  }
-
-  /**
-   *  @param stopwords The stopwords to use
-   * @param replaceInvalidAcronym Set to true if this analyzer should replace mischaracterized acronyms in the StandardTokenizer
-   *
-   * See https://issues.apache.org/jira/browse/LUCENE-1068
-   *
-   * @deprecated Remove in 3.X and make true the only valid value
-   */
-  public StandardAnalyzer(Reader stopwords, boolean replaceInvalidAcronym) throws IOException{
-    this(Version.LUCENE_24, stopwords);
-    this.replaceInvalidAcronym = replaceInvalidAcronym;
-  }
-
-  /**
-   * @param stopwords The stopwords to use
-   * @param replaceInvalidAcronym Set to true if this analyzer should replace mischaracterized acronyms in the StandardTokenizer
-   *
-   * See https://issues.apache.org/jira/browse/LUCENE-1068
-   *
-   * @deprecated Remove in 3.X and make true the only valid value
-   */
-  public StandardAnalyzer(File stopwords, boolean replaceInvalidAcronym) throws IOException{
-    this(Version.LUCENE_24, stopwords);
-    this.replaceInvalidAcronym = replaceInvalidAcronym;
-  }
-
-  /**
-   *
-   * @param stopwords The stopwords to use
-   * @param replaceInvalidAcronym Set to true if this analyzer should replace mischaracterized acronyms in the StandardTokenizer
-   *
-   * See https://issues.apache.org/jira/browse/LUCENE-1068
-   *
-   * @deprecated Remove in 3.X and make true the only valid value
-   */
-  public StandardAnalyzer(String [] stopwords, boolean replaceInvalidAcronym) throws IOException{
-    this(Version.LUCENE_24, StopFilter.makeStopSet(stopwords));
-    this.replaceInvalidAcronym = replaceInvalidAcronym;
-  }
-
-  /**
-   * @param stopwords The stopwords to use
-   * @param replaceInvalidAcronym Set to true if this analyzer should replace mischaracterized acronyms in the StandardTokenizer
-   *
-   * See https://issues.apache.org/jira/browse/LUCENE-1068
-   *
-   * @deprecated Remove in 3.X and make true the only valid value
-   */
-  public StandardAnalyzer(Set stopwords, boolean replaceInvalidAcronym) throws IOException{
-    this(Version.LUCENE_24, stopwords);
-    this.replaceInvalidAcronym = replaceInvalidAcronym;
-  }
-
-  private final void init(Version matchVersion) {
-    setOverridesTokenStreamMethod(StandardAnalyzer.class);
-    if (matchVersion.onOrAfter(Version.LUCENE_29)) {
-      enableStopPositionIncrements = true;
-    } else {
-      useDefaultStopPositionIncrements = true;
-    }
+    this(matchVersion, WordlistLoader.getWordSet(stopwords));
   }
 
   /** Constructs a {@link StandardTokenizer} filtered by a {@link
   StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}. */
   public TokenStream tokenStream(String fieldName, Reader reader) {
-    StandardTokenizer tokenStream = new StandardTokenizer(reader, replaceInvalidAcronym);
+    StandardTokenizer tokenStream = new StandardTokenizer(matchVersion, reader);
     tokenStream.setMaxTokenLength(maxTokenLength);
     TokenStream result = new StandardFilter(tokenStream);
     result = new LowerCaseFilter(result);
-    if (useDefaultStopPositionIncrements) {
-      result = new StopFilter(result, stopSet);
-    } else {
-      result = new StopFilter(enableStopPositionIncrements, result, stopSet);
-    }
+    result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion),
+                            result, stopSet);
     return result;
   }
 
@@ -301,7 +132,6 @@
     return maxTokenLength;
   }
 
-  /** @deprecated Use {@link #tokenStream} instead */
   public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException {
     if (overridesTokenStreamMethod) {
       // LUCENE-1678: force fallback to tokenStream() if we
@@ -313,14 +143,11 @@
     if (streams == null) {
       streams = new SavedStreams();
       setPreviousTokenStream(streams);
-      streams.tokenStream = new StandardTokenizer(reader);
+      streams.tokenStream = new StandardTokenizer(matchVersion, reader);
       streams.filteredTokenStream = new StandardFilter(streams.tokenStream);
       streams.filteredTokenStream = new LowerCaseFilter(streams.filteredTokenStream);
-      if (useDefaultStopPositionIncrements) {
-        streams.filteredTokenStream = new StopFilter(streams.filteredTokenStream, stopSet);
-      } else {
-        streams.filteredTokenStream = new StopFilter(enableStopPositionIncrements, streams.filteredTokenStream, stopSet);
-      }
+      streams.filteredTokenStream = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion),
+                                                   streams.filteredTokenStream, stopSet);
     } else {
       streams.tokenStream.reset(reader);
     }
@@ -330,26 +157,4 @@
 
     return streams.filteredTokenStream;
   }
-
-  /**
-   *
-   * @return true if this Analyzer is replacing mischaracterized acronyms in the StandardTokenizer
-   *
-   * See https://issues.apache.org/jira/browse/LUCENE-1068
-   * @deprecated This will be removed (hardwired to true) in 3.0
-   */
-  public boolean isReplaceInvalidAcronym() {
-    return replaceInvalidAcronym;
-  }
-
-  /**
-   *
-   * @param replaceInvalidAcronym Set to true if this Analyzer is replacing mischaracterized acronyms in the StandardTokenizer
-   *
-   * See https://issues.apache.org/jira/browse/LUCENE-1068
-   * @deprecated This will be removed (hardwired to true) in 3.0
-   */
-  public void setReplaceInvalidAcronym(boolean replaceInvalidAcronym) {
-    this.replaceInvalidAcronym = replaceInvalidAcronym;
-  }
 }

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/analysis/standard/StandardFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/analysis/standard/StandardFilter.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/analysis/standard/StandardFilter.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/analysis/standard/StandardFilter.java Sat Oct 24 21:23:15 2009
@@ -17,7 +17,6 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java Sat Oct 24 21:23:15 2009
@@ -27,6 +27,7 @@
 import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.Version;
 
 /** A grammar-based tokenizer constructed with JFlex
  *
@@ -43,6 +44,14 @@
  * <p>Many applications have specific tokenizer needs.  If this tokenizer does
  * not suit your application, please consider copying this source code
  * directory to your project and maintaining your own grammar-based tokenizer.
+ *
+ * <a name="version"/>
+ * <p>You must specify the required {@link Version}
+ * compatibility when creating StandardAnalyzer:
+ * <ul>
+ *   <li> As of 2.4, Tokens incorrectly identified as acronyms
+ *        are corrected (see <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a>
+ * </ul>
  */
 
 public final class StandardTokenizer extends Tokenizer {
@@ -105,55 +114,50 @@
   }
 
   /**
-   * Creates a new instance of the {@link StandardTokenizer}. Attaches the
-   * <code>input</code> to a newly created JFlex scanner.
-   */
-  public StandardTokenizer(Reader input) {
-    this(input, false);
-  }
-
-  /**
    * Creates a new instance of the {@link org.apache.lucene.analysis.standard.StandardTokenizer}.  Attaches
    * the <code>input</code> to the newly created JFlex scanner.
    *
    * @param input The input reader
-   * @param replaceInvalidAcronym Set to true to replace mischaracterized acronyms with HOST.
    *
    * See http://issues.apache.org/jira/browse/LUCENE-1068
    */
-  public StandardTokenizer(Reader input, boolean replaceInvalidAcronym) {
+  public StandardTokenizer(Version matchVersion, Reader input) {
     super();
     this.scanner = new StandardTokenizerImpl(input);
-    init(input, replaceInvalidAcronym);
+    init(input, matchVersion);
   }
 
   /**
    * Creates a new StandardTokenizer with a given {@link AttributeSource}. 
    */
-  public StandardTokenizer(AttributeSource source, Reader input, boolean replaceInvalidAcronym) {
+  public StandardTokenizer(Version matchVersion, AttributeSource source, Reader input) {
     super(source);
     this.scanner = new StandardTokenizerImpl(input);
-    init(input, replaceInvalidAcronym);
+    init(input, matchVersion);
   }
 
   /**
    * Creates a new StandardTokenizer with a given {@link org.apache.lucene.util.AttributeSource.AttributeFactory} 
    */
-  public StandardTokenizer(AttributeFactory factory, Reader input, boolean replaceInvalidAcronym) {
+  public StandardTokenizer(Version matchVersion, AttributeFactory factory, Reader input) {
     super(factory);
     this.scanner = new StandardTokenizerImpl(input);
-    init(input, replaceInvalidAcronym);
+    init(input, matchVersion);
   }
 
-  private void init(Reader input, boolean replaceInvalidAcronym) {
-    this.replaceInvalidAcronym = replaceInvalidAcronym;
+  private void init(Reader input, Version matchVersion) {
+    if (matchVersion.onOrAfter(Version.LUCENE_24)) {
+      replaceInvalidAcronym = true;
+    } else {
+      replaceInvalidAcronym = false;
+    }
     this.input = input;    
     termAtt = addAttribute(TermAttribute.class);
     offsetAtt = addAttribute(OffsetAttribute.class);
     posIncrAtt = addAttribute(PositionIncrementAttribute.class);
     typeAtt = addAttribute(TypeAttribute.class);
   }
-  
+
   // this tokenizer generates three attributes:
   // offset, positionIncrement and type
   private TermAttribute termAtt;

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/AbstractField.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/AbstractField.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/AbstractField.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/AbstractField.java Sat Oct 24 21:23:15 2009
@@ -56,35 +56,10 @@
       throw new NullPointerException("name cannot be null");
     this.name = StringHelper.intern(name);        // field names are interned
 
-    if (store == Field.Store.YES){
-      this.isStored = true;
-    }
-    else if (store == Field.Store.NO){
-      this.isStored = false;
-    }
-    else
-      throw new IllegalArgumentException("unknown store parameter " + store);
-
-    if (index == Field.Index.NO) {
-      this.isIndexed = false;
-      this.isTokenized = false;
-    } else if (index == Field.Index.ANALYZED) {
-      this.isIndexed = true;
-      this.isTokenized = true;
-    } else if (index == Field.Index.NOT_ANALYZED) {
-      this.isIndexed = true;
-      this.isTokenized = false;
-    } else if (index == Field.Index.NOT_ANALYZED_NO_NORMS) {
-      this.isIndexed = true;
-      this.isTokenized = false;
-      this.omitNorms = true;
-    } else if (index == Field.Index.ANALYZED_NO_NORMS) {
-      this.isIndexed = true;
-      this.isTokenized = true;
-      this.omitNorms = true;
-    } else {
-      throw new IllegalArgumentException("unknown index parameter " + index);
-    }
+    this.isStored = store.isStored();
+    this.isIndexed = index.isIndexed();
+    this.isTokenized = index.isAnalyzed();
+    this.omitNorms = index.omitNorms();
 
     this.isBinary = false;
 
@@ -138,34 +113,9 @@
   public String name()    { return name; }
 
   protected void setStoreTermVector(Field.TermVector termVector) {
-    if (termVector == Field.TermVector.NO) {
-      this.storeTermVector = false;
-      this.storePositionWithTermVector = false;
-      this.storeOffsetWithTermVector = false;
-    }
-    else if (termVector == Field.TermVector.YES) {
-      this.storeTermVector = true;
-      this.storePositionWithTermVector = false;
-      this.storeOffsetWithTermVector = false;
-    }
-    else if (termVector == Field.TermVector.WITH_POSITIONS) {
-      this.storeTermVector = true;
-      this.storePositionWithTermVector = true;
-      this.storeOffsetWithTermVector = false;
-    }
-    else if (termVector == Field.TermVector.WITH_OFFSETS) {
-      this.storeTermVector = true;
-      this.storePositionWithTermVector = false;
-      this.storeOffsetWithTermVector = true;
-    }
-    else if (termVector == Field.TermVector.WITH_POSITIONS_OFFSETS) {
-      this.storeTermVector = true;
-      this.storePositionWithTermVector = true;
-      this.storeOffsetWithTermVector = true;
-    }
-    else {
-      throw new IllegalArgumentException("unknown termVector parameter " + termVector);
-    }
+    this.storeTermVector = termVector.isStored();
+    this.storePositionWithTermVector = termVector.withPositions();
+    this.storeOffsetWithTermVector = termVector.withOffsets();
   }
 
   /** True iff the value of the field is to be stored in the index for return

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/Document.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/Document.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/Document.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/Document.java Sat Oct 24 21:23:15 2009
@@ -102,7 +102,7 @@
   public final void removeField(String name) {
     Iterator<Fieldable> it = fields.iterator();
     while (it.hasNext()) {
-      Fieldable field = (Fieldable)it.next();
+      Fieldable field = it.next();
       if (field.name().equals(name)) {
         it.remove();
         return;
@@ -122,7 +122,7 @@
   public final void removeFields(String name) {
     Iterator<Fieldable> it = fields.iterator();
     while (it.hasNext()) {
-      Fieldable field = (Fieldable)it.next();
+      Fieldable field = it.next();
       if (field.name().equals(name)) {
         it.remove();
       }
@@ -196,7 +196,7 @@
      if (result.size() == 0)
        return NO_FIELDS;
 
-     return (Field[])result.toArray(new Field[result.size()]);
+     return result.toArray(new Field[result.size()]);
    }
 
 
@@ -221,7 +221,7 @@
      if (result.size() == 0)
        return NO_FIELDABLES;
 
-     return (Fieldable[])result.toArray(new Fieldable[result.size()]);
+     return result.toArray(new Fieldable[result.size()]);
    }
 
 
@@ -244,7 +244,7 @@
     if (result.size() == 0)
       return NO_STRINGS;
     
-    return (String[])result.toArray(new String[result.size()]);
+    return result.toArray(new String[result.size()]);
   }
 
   private final static byte[][] NO_BYTES = new byte[0][];
@@ -268,7 +268,7 @@
     if (result.size() == 0)
       return NO_BYTES;
   
-    return (byte[][])result.toArray(new byte[result.size()][]);
+    return result.toArray(new byte[result.size()][]);
   }
   
   /**

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/Field.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/Field.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/Field.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/Field.java Sat Oct 24 21:23:15 2009
@@ -19,7 +19,6 @@
 
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.index.IndexWriter;   // for javadoc
-import org.apache.lucene.util.Parameter;
 import org.apache.lucene.util.StringHelper;
 
 import java.io.Reader;
@@ -36,45 +35,56 @@
 public final class Field extends AbstractField implements Fieldable, Serializable {
   
   /** Specifies whether and how a field should be stored. */
-  public static final class Store extends Parameter implements Serializable {
-
-    private Store(String name) {
-      super(name);
-    }
+  public static enum Store {
 
     /** Store the original field value in the index. This is useful for short texts
      * like a document's title which should be displayed with the results. The
      * value is stored in its original form, i.e. no analyzer is used before it is
      * stored.
      */
-    public static final Store YES = new Store("YES");
+    YES {
+      public boolean isStored() { return true; }
+    },
 
     /** Do not store the field value in the index. */
-    public static final Store NO = new Store("NO");
+    NO
+    {
+      public boolean isStored() { return false; }
+    };
+
+    public abstract boolean isStored();
   }
 
   /** Specifies whether and how a field should be indexed. */
-  public static final class Index extends Parameter implements Serializable {
-
-    private Index(String name) {
-      super(name);
-    }
+  public static enum Index {
 
     /** Do not index the field value. This field can thus not be searched,
      * but one can still access its contents provided it is
      * {@link Field.Store stored}. */
-    public static final Index NO = new Index("NO");
+    NO {
+      public boolean isIndexed()  { return false; }
+      public boolean isAnalyzed() { return false; }
+      public boolean omitNorms()  { return true;  }   
+    },
 
     /** Index the tokens produced by running the field's
      * value through an Analyzer.  This is useful for
      * common text. */
-    public static final Index ANALYZED = new Index("ANALYZED");
+    ANALYZED {
+      public boolean isIndexed()  { return true;  }
+      public boolean isAnalyzed() { return true;  }
+      public boolean omitNorms()  { return false; }   	
+    },
 
     /** Index the field's value without using an Analyzer, so it can be searched.
      * As no analyzer is used the value will be stored as a single term. This is
      * useful for unique Ids like product numbers.
      */
-    public static final Index NOT_ANALYZED = new Index("NOT_ANALYZED");
+    NOT_ANALYZED {
+      public boolean isIndexed()  { return true;  }
+      public boolean isAnalyzed() { return false; }
+      public boolean omitNorms()  { return false; }   	
+    },
 
     /** Expert: Index the field's value without an Analyzer,
      * and also disable the storing of norms.  Note that you
@@ -90,44 +100,96 @@
      * above described effect on a field, all instances of
      * that field must be indexed with NOT_ANALYZED_NO_NORMS
      * from the beginning. */
-    public static final Index NOT_ANALYZED_NO_NORMS = new Index("NOT_ANALYZED_NO_NORMS");
+    NOT_ANALYZED_NO_NORMS {
+      public boolean isIndexed()  { return true;  }
+      public boolean isAnalyzed() { return false; }
+      public boolean omitNorms()  { return true;  }   	
+    },
 
     /** Expert: Index the tokens produced by running the
      *  field's value through an Analyzer, and also
      *  separately disable the storing of norms.  See
      *  {@link #NOT_ANALYZED_NO_NORMS} for what norms are
      *  and why you may want to disable them. */
-    public static final Index ANALYZED_NO_NORMS = new Index("ANALYZED_NO_NORMS");
+    ANALYZED_NO_NORMS {
+      public boolean isIndexed()  { return true;  }
+      public boolean isAnalyzed() { return true;  }
+      public boolean omitNorms()  { return true;  }   	
+    };
+
+    /** Get the best representation of the index given the flags. */
+    public static Index toIndex(boolean indexed, boolean analyzed) {
+      return toIndex(indexed, analyzed, false);
+    }
+
+    /** Expert: Get the best representation of the index given the flags. */
+    public static Index toIndex(boolean indexed, boolean analyzed, boolean omitNorms) {
+
+      // If it is not indexed nothing else matters
+      if (!indexed) {
+        return Index.NO;
+      }
+
+      // typical, non-expert
+      if (!omitNorms) {
+        if (analyzed) {
+          return Index.ANALYZED;
+        }
+        return Index.NOT_ANALYZED;
+      }
+
+      // Expert: Norms omitted
+      if (analyzed) {
+        return Index.ANALYZED_NO_NORMS;
+      }
+      return Index.NOT_ANALYZED_NO_NORMS;
+    }
+
+    public abstract boolean isIndexed();
+    public abstract boolean isAnalyzed();
+    public abstract boolean omitNorms();  	
   }
 
   /** Specifies whether and how a field should have term vectors. */
-  public static final class TermVector  extends Parameter implements Serializable {
-    
-    private TermVector(String name) {
-      super(name);
-    }
+  public static enum TermVector {
     
     /** Do not store term vectors. 
      */
-    public static final TermVector NO = new TermVector("NO");
+    NO {
+    	public boolean isStored()      { return false; }
+    	public boolean withPositions() { return false; }
+    	public boolean withOffsets()   { return false; }
+    },
     
     /** Store the term vectors of each document. A term vector is a list
      * of the document's terms and their number of occurrences in that document. */
-    public static final TermVector YES = new TermVector("YES");
+    YES {
+    	public boolean isStored()      { return true;  }
+    	public boolean withPositions() { return false; }
+    	public boolean withOffsets()   { return false; }
+    },
     
     /**
      * Store the term vector + token position information
      * 
      * @see #YES
      */ 
-    public static final TermVector WITH_POSITIONS = new TermVector("WITH_POSITIONS");
+    WITH_POSITIONS {
+    	public boolean isStored()      { return true;  }
+    	public boolean withPositions() { return true;  }
+    	public boolean withOffsets()   { return false; }
+    },
     
     /**
      * Store the term vector + Token offset information
      * 
      * @see #YES
      */ 
-    public static final TermVector WITH_OFFSETS = new TermVector("WITH_OFFSETS");
+    WITH_OFFSETS {
+    	public boolean isStored()      { return true;  }
+    	public boolean withPositions() { return false; }
+    	public boolean withOffsets()   { return true;  }
+    },
     
     /**
      * Store the term vector + Token position and offset information
@@ -136,7 +198,36 @@
      * @see #WITH_POSITIONS
      * @see #WITH_OFFSETS
      */ 
-    public static final TermVector WITH_POSITIONS_OFFSETS = new TermVector("WITH_POSITIONS_OFFSETS");
+    WITH_POSITIONS_OFFSETS {
+    	public boolean isStored()      { return true;  }
+    	public boolean withPositions() { return true;  }
+    	public boolean withOffsets()   { return true;  }
+    };
+
+    /** Get the best representation of a TermVector given the flags. */
+    public static TermVector toTermVector(boolean stored, boolean withOffsets, boolean withPositions) {
+
+      // If it is not stored, nothing else matters.
+      if (!stored) {
+    	return TermVector.NO;
+      }
+
+      if (withOffsets) {
+        if (withPositions) {
+          return Field.TermVector.WITH_POSITIONS_OFFSETS;
+        }
+        return Field.TermVector.WITH_OFFSETS;
+      }
+
+      if (withPositions) {
+        return Field.TermVector.WITH_POSITIONS;
+      }
+      return Field.TermVector.YES;
+    }
+
+    public abstract boolean isStored();
+    public abstract boolean withPositions();
+    public abstract boolean withOffsets();
   }
   
   
@@ -288,38 +379,15 @@
     
     this.fieldsData = value;
 
-    if (store == Store.YES){
-      this.isStored = true;
-    }
-    else if (store == Store.NO){
-      this.isStored = false;
-    }
-    else
-      throw new IllegalArgumentException("unknown store parameter " + store);
+    this.isStored = store.isStored();
    
+    this.isIndexed = index.isIndexed();
+    this.isTokenized = index.isAnalyzed();
+    this.omitNorms = index.omitNorms();
     if (index == Index.NO) {
-      this.isIndexed = false;
-      this.isTokenized = false;
       this.omitTermFreqAndPositions = false;
-      this.omitNorms = true;
-    } else if (index == Index.ANALYZED) {
-      this.isIndexed = true;
-      this.isTokenized = true;
-    } else if (index == Index.NOT_ANALYZED) {
-      this.isIndexed = true;
-      this.isTokenized = false;
-    } else if (index == Index.NOT_ANALYZED_NO_NORMS) {
-      this.isIndexed = true;
-      this.isTokenized = false;
-      this.omitNorms = true;
-    } else if (index == Index.ANALYZED_NO_NORMS) {
-      this.isIndexed = true;
-      this.isTokenized = true;
-      this.omitNorms = true;
-    } else {
-      throw new IllegalArgumentException("unknown index parameter " + index);
-    }
-    
+    }    
+
     this.isBinary = false;
 
     setStoreTermVector(termVector);
@@ -449,14 +517,10 @@
     this.name = StringHelper.intern(name);        // field names are interned
     fieldsData = value;
     
-    if (store == Store.YES) {
-      isStored = true;
-    }
-    else if (store == Store.NO)
+    if (store == Store.NO)
       throw new IllegalArgumentException("binary values can't be unstored");
-    else
-      throw new IllegalArgumentException("unknown store parameter " + store);
     
+    isStored = store.isStored();
     isIndexed   = false;
     isTokenized = false;
     omitTermFreqAndPositions = false;

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/FieldSelectorResult.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/FieldSelectorResult.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/FieldSelectorResult.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/document/FieldSelectorResult.java Sat Oct 24 21:23:15 2009
@@ -1,6 +1,5 @@
 package org.apache.lucene.document;
 
-import java.io.Serializable;
 /**
  * Copyright 2004 The Apache Software Foundation
  *
@@ -21,16 +20,16 @@
  *  Provides information about what should be done with this Field 
  *
  **/
-//Replace with an enumerated type in 1.5
-public final class FieldSelectorResult implements Serializable {
+public enum FieldSelectorResult {
 
     /**
      * Load this {@link Field} every time the {@link Document} is loaded, reading in the data as it is encountered.
      *  {@link Document#getField(String)} and {@link Document#getFieldable(String)} should not return null.
      *<p/>
      * {@link Document#add(Fieldable)} should be called by the Reader.
-      */
-  public transient static final FieldSelectorResult LOAD = new FieldSelectorResult(0);
+     */
+  LOAD,
+
     /**
      * Lazily load this {@link Field}.  This means the {@link Field} is valid, but it may not actually contain its data until
      * invoked.  {@link Document#getField(String)} SHOULD NOT BE USED.  {@link Document#getFieldable(String)} is safe to use and should
@@ -38,14 +37,16 @@
      *<p/>
      * {@link Document#add(Fieldable)} should be called by the Reader.
      */
-  public transient static final FieldSelectorResult LAZY_LOAD = new FieldSelectorResult(1);
+  LAZY_LOAD,
+
     /**
      * Do not load the {@link Field}.  {@link Document#getField(String)} and {@link Document#getFieldable(String)} should return null.
      * {@link Document#add(Fieldable)} is not called.
      * <p/>
      * {@link Document#add(Fieldable)} should not be called by the Reader.
      */
-  public transient static final FieldSelectorResult NO_LOAD = new FieldSelectorResult(2);
+  NO_LOAD,
+
     /**
      * Load this field as in the {@link #LOAD} case, but immediately return from {@link Field} loading for the {@link Document}.  Thus, the
      * Document may not have its complete set of Fields.  {@link Document#getField(String)} and {@link Document#getFieldable(String)} should
@@ -53,37 +54,14 @@
      * <p/>
      * {@link Document#add(Fieldable)} should be called by the Reader.
      */
-  public transient static final FieldSelectorResult LOAD_AND_BREAK = new FieldSelectorResult(3);
-
-     /** Expert:  Load the size of this {@link Field} rather than its value.
-       * Size is measured as number of bytes required to store the field == bytes for a binary or any compressed value, and 2*chars for a String value.
-      * The size is stored as a binary value, represented as an int in a byte[], with the higher order byte first in [0]
-      */
-  public transient static final FieldSelectorResult SIZE = new FieldSelectorResult(5);
-
-  /** Expert: Like {@link #SIZE} but immediately break from the field loading loop, i.e., stop loading further fields, after the size is loaded */         
-  public transient static final FieldSelectorResult SIZE_AND_BREAK = new FieldSelectorResult(6);
-
+  LOAD_AND_BREAK,
 
+    /** Expert:  Load the size of this {@link Field} rather than its value.
+     * Size is measured as number of bytes required to store the field == bytes for a binary or any compressed value, and 2*chars for a String value.
+     * The size is stored as a binary value, represented as an int in a byte[], with the higher order byte first in [0]
+     */
+  SIZE,
 
-  private int id;
-
-  private FieldSelectorResult(int id) {
-    this.id = id;
-  }
-
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    final FieldSelectorResult that = (FieldSelectorResult) o;
-
-    if (id != that.id) return false;
-
-    return true;
-  }
-
-  public int hashCode() {
-    return id;
-  }
+    /** Expert: Like {@link #SIZE} but immediately break from the field loading loop, i.e., stop loading further fields, after the size is loaded */         
+  SIZE_AND_BREAK
 }

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/BufferedDeletes.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/BufferedDeletes.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/BufferedDeletes.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/BufferedDeletes.java Sat Oct 24 21:23:15 2009
@@ -136,7 +136,7 @@
     if (queries.size() > 0) {
       newDeleteQueries = new HashMap<Query, Integer>(queries.size());
       for(Entry<Query,Integer> entry: queries.entrySet()) {
-        Integer num = (Integer) entry.getValue();
+        Integer num = entry.getValue();
         newDeleteQueries.put(entry.getKey(),
                              Integer.valueOf(mapper.remap(num.intValue())));
       }

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/CompoundFileReader.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/CompoundFileReader.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/CompoundFileReader.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/CompoundFileReader.java Sat Oct 24 21:23:15 2009
@@ -129,7 +129,7 @@
         if (stream == null)
             throw new IOException("Stream closed");
 
-        FileEntry entry = (FileEntry) entries.get(id);
+        FileEntry entry = entries.get(id);
         if (entry == null)
             throw new IOException("No sub-file with id " + id + " found");
 
@@ -139,7 +139,7 @@
     /** Returns an array of strings, one for each file in the directory. */
     public String[] listAll() {
         String res[] = new String[entries.size()];
-        return (String[]) entries.keySet().toArray(res);
+        return entries.keySet().toArray(res);
     }
 
     /** Returns true iff a file with the given name exists. */
@@ -176,7 +176,7 @@
     public long fileLength(String name)
     throws IOException
     {
-        FileEntry e = (FileEntry) entries.get(name);
+        FileEntry e = entries.get(name);
         if (e == null)
             throw new IOException("File " + name + " does not exist");
         return e.length;

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java Sat Oct 24 21:23:15 2009
@@ -88,7 +88,7 @@
 
     final int numThreads = mergeThreadCount();
     for(int i=0;i<numThreads;i++) {
-      MergeThread merge = (MergeThread) mergeThreads.get(i);
+      MergeThread merge = mergeThreads.get(i);
       merge.setThreadPriority(pri);
     }
   }
@@ -123,7 +123,7 @@
       final int count = mergeThreads.size();
       if (verbose()) {
         for(int i=0;i<count;i++)
-          message("    " + i + ": " + ((MergeThread) mergeThreads.get(i)));
+          message("    " + i + ": " + mergeThreads.get(i));
       }
       
       try {
@@ -141,7 +141,7 @@
     int count = 0;
     final int numThreads = mergeThreads.size();
     for(int i=0;i<numThreads;i++)
-      if (((MergeThread) mergeThreads.get(i)).isAlive())
+      if (mergeThreads.get(i).isAlive())
         count++;
     return count;
   }

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DirectoryReader.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DirectoryReader.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DirectoryReader.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DirectoryReader.java Sat Oct 24 21:23:15 2009
@@ -248,7 +248,7 @@
     
     for (int i = infos.size() - 1; i>=0; i--) {
       // find SegmentReader for this segment
-      Integer oldReaderIndex = (Integer) segmentReaders.get(infos.info(i).name);
+      Integer oldReaderIndex = segmentReaders.get(infos.info(i).name);
       if (oldReaderIndex == null) {
         // this is a new segment, no old SegmentReader can be reused
         newReaders[i] = null;
@@ -314,12 +314,12 @@
           continue;
         }
 
-        byte[] oldBytes = (byte[]) entry.getValue();
+        byte[] oldBytes = entry.getValue();
 
         byte[] bytes = new byte[maxDoc()];
 
         for (int i = 0; i < subReaders.length; i++) {
-          Integer oldReaderIndex = ((Integer) segmentReaders.get(subReaders[i].getSegmentName()));
+          Integer oldReaderIndex = segmentReaders.get(subReaders[i].getSegmentName());
 
           // this SegmentReader was not re-opened, we can copy all of its norms 
           if (oldReaderIndex != null &&
@@ -496,14 +496,14 @@
         assert isCurrent();
 
         if (openReadOnly) {
-          return (IndexReader) clone(openReadOnly);
+          return clone(openReadOnly);
         } else {
           return this;
         }
       } else if (isCurrent()) {
         if (openReadOnly != readOnly) {
           // Just fallback to clone
-          return (IndexReader) clone(openReadOnly);
+          return clone(openReadOnly);
         } else {
           return this;
         }
@@ -514,7 +514,7 @@
       if (segmentInfos != null && commit.getSegmentsFileName().equals(segmentInfos.getCurrentSegmentFileName())) {
         if (readOnly != openReadOnly) {
           // Just fallback to clone
-          return (IndexReader) clone(openReadOnly);
+          return clone(openReadOnly);
         } else {
           return this;
         }
@@ -665,7 +665,7 @@
 
   public synchronized byte[] norms(String field) throws IOException {
     ensureOpen();
-    byte[] bytes = (byte[])normsCache.get(field);
+    byte[] bytes = normsCache.get(field);
     if (bytes != null)
       return bytes;          // cache hit
     if (!hasNorms(field))
@@ -681,7 +681,7 @@
   public synchronized void norms(String field, byte[] result, int offset)
     throws IOException {
     ensureOpen();
-    byte[] bytes = (byte[])normsCache.get(field);
+    byte[] bytes = normsCache.get(field);
     if (bytes==null && !hasNorms(field)) {
       Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f));
     } else if (bytes != null) {                           // cache hit
@@ -1578,7 +1578,7 @@
       int numMatchingSegments = 0;
       matchingSegments[0] = null;
 
-      LegacySegmentMergeInfo top = (LegacySegmentMergeInfo)queue.top();
+      LegacySegmentMergeInfo top = queue.top();
 
       if (top == null) {
         term = null;
@@ -1592,7 +1592,7 @@
         matchingSegments[numMatchingSegments++] = top;
         queue.pop();
         docFreq += top.termEnum.docFreq();    // increment freq
-        top = (LegacySegmentMergeInfo)queue.top();
+        top = queue.top();
       }
 
       matchingSegments[numMatchingSegments] = null;
@@ -1771,7 +1771,7 @@
     }
   
     protected TermDocs termDocs(IndexReader reader) throws IOException {
-      return (TermDocs)reader.termPositions();
+      return reader.termPositions();
     }
   
     public int nextPosition() throws IOException {

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DocFieldConsumers.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DocFieldConsumers.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DocFieldConsumers.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DocFieldConsumers.java Sat Oct 24 21:23:15 2009
@@ -29,6 +29,8 @@
 /** This is just a "splitter" class: it lets you wrap two
  *  DocFieldConsumer instances as a single consumer. */
 
+// TODO: Fix the unchecked collections, I do not understand the whole code here -- Uwe
+@SuppressWarnings("unchecked")
 final class DocFieldConsumers extends DocFieldConsumer {
   final DocFieldConsumer one;
   final DocFieldConsumer two;

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java Sat Oct 24 21:23:15 2009
@@ -128,7 +128,7 @@
   }
 
   private void rehash() {
-    final int newHashSize = (int) (fieldHash.length*2);
+    final int newHashSize = (fieldHash.length*2);
     assert newHashSize > fieldHash.length;
 
     final DocFieldProcessorPerField newHashArray[] = new DocFieldProcessorPerField[newHashSize];
@@ -172,7 +172,7 @@
     // vectors, etc.):
 
     for(int i=0;i<numDocFields;i++) {
-      Fieldable field = (Fieldable) docFields.get(i);
+      Fieldable field = docFields.get(i);
       final String fieldName = field.name();
 
       // Make sure we have a PerField allocated

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DocumentsWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DocumentsWriter.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DocumentsWriter.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/DocumentsWriter.java Sat Oct 24 21:23:15 2009
@@ -402,10 +402,12 @@
 
   /* Returns Collection of files in use by this instance,
    * including any flushed segments. */
+  @SuppressWarnings("unchecked")
   synchronized List<String> openFiles() {
-    return ( List<String>) ((ArrayList<String>) openFiles).clone();
+    return (List<String>) ((ArrayList<String>) openFiles).clone();
   }
 
+  @SuppressWarnings("unchecked")
   synchronized List<String> closedFiles() {
     return (List<String>) ((ArrayList<String>) closedFiles).clone();
   }
@@ -670,7 +672,7 @@
     // First, find a thread state.  If this thread already
     // has affinity to a specific ThreadState, use that one
     // again.
-    DocumentsWriterThreadState state = (DocumentsWriterThreadState) threadBindings.get(Thread.currentThread());
+    DocumentsWriterThreadState state = threadBindings.get(Thread.currentThread());
     if (state == null) {
 
       // First time this thread has called us since last

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/FieldInfos.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/FieldInfos.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/FieldInfos.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/FieldInfos.java Sat Oct 24 21:23:15 2009
@@ -103,7 +103,7 @@
     FieldInfos fis = new FieldInfos();
     final int numField = byNumber.size();
     for(int i=0;i<numField;i++) {
-      FieldInfo fi = (FieldInfo) ((FieldInfo) byNumber.get(i)).clone();
+      FieldInfo fi = (FieldInfo) ( byNumber.get(i)).clone();
       fis.byNumber.add(fi);
       fis.byName.put(fi.name, fi);
     }
@@ -265,7 +265,7 @@
   }
 
   public FieldInfo fieldInfo(String fieldName) {
-    return (FieldInfo) byName.get(fieldName);
+    return  byName.get(fieldName);
   }
 
   /**

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/FieldsReader.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/FieldsReader.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/FieldsReader.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/FieldsReader.java Sat Oct 24 21:23:15 2009
@@ -18,7 +18,12 @@
  */
 
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.document.*;
+import org.apache.lucene.document.AbstractField;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.AlreadyClosedException;
@@ -298,8 +303,8 @@
       fieldsStream.seek(pointer + toRead);
     } else {
       Field.Store store = Field.Store.YES;
-      Field.Index index = getIndexType(fi, tokenize);
-      Field.TermVector termVector = getTermVectorType(fi);
+      Field.Index index = Field.Index.toIndex(fi.isIndexed, tokenize);
+      Field.TermVector termVector = Field.TermVector.toTermVector(fi.storeTermVector, fi.storeOffsetWithTermVector, fi.storePositionWithTermVector);
 
       AbstractField f;
       int length = fieldsStream.readVInt();
@@ -327,8 +332,8 @@
       doc.add(new Field(fi.name, b, Field.Store.YES));
     } else {
       Field.Store store = Field.Store.YES;
-      Field.Index index = getIndexType(fi, tokenize);
-      Field.TermVector termVector = getTermVectorType(fi);
+      Field.Index index = Field.Index.toIndex(fi.isIndexed, tokenize);
+      Field.TermVector termVector = Field.TermVector.toTermVector(fi.storeTermVector, fi.storeOffsetWithTermVector, fi.storePositionWithTermVector);
 
       AbstractField f;
       f = new Field(fi.name,     // name
@@ -357,37 +362,6 @@
     return size;
   }
 
-  private Field.TermVector getTermVectorType(FieldInfo fi) {
-    Field.TermVector termVector = null;
-    if (fi.storeTermVector) {
-      if (fi.storeOffsetWithTermVector) {
-        if (fi.storePositionWithTermVector) {
-          termVector = Field.TermVector.WITH_POSITIONS_OFFSETS;
-        } else {
-          termVector = Field.TermVector.WITH_OFFSETS;
-        }
-      } else if (fi.storePositionWithTermVector) {
-        termVector = Field.TermVector.WITH_POSITIONS;
-      } else {
-        termVector = Field.TermVector.YES;
-      }
-    } else {
-      termVector = Field.TermVector.NO;
-    }
-    return termVector;
-  }
-
-  private Field.Index getIndexType(FieldInfo fi, boolean tokenize) {
-    Field.Index index;
-    if (fi.isIndexed && tokenize)
-      index = Field.Index.ANALYZED;
-    else if (fi.isIndexed && !tokenize)
-      index = Field.Index.NOT_ANALYZED;
-    else
-      index = Field.Index.NO;
-    return index;
-  }
-
   /**
    * A Lazy implementation of Fieldable that differs loading of fields until asked for, instead of when the Document is
    * loaded.

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/FreqProxTermsWriter.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/FreqProxTermsWriter.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/FreqProxTermsWriter.java Sat Oct 24 21:23:15 2009
@@ -31,6 +31,8 @@
 import org.apache.lucene.index.codecs.TermsConsumer;
 import org.apache.lucene.util.UnicodeUtil;
 
+// TODO: Fix the unchecked collections, I do not understand the whole code here -- Uwe
+@SuppressWarnings("unchecked")
 final class FreqProxTermsWriter extends TermsHashConsumer {
 
   public TermsHashConsumerPerThread addThread(TermsHashPerThread perThread) {

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/IndexFileDeleter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/IndexFileDeleter.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/IndexFileDeleter.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/IndexFileDeleter.java Sat Oct 24 21:23:15 2009
@@ -280,7 +280,7 @@
       // First decref all files that had been referred to by
       // the now-deleted commits:
       for(int i=0;i<size;i++) {
-        CommitPoint commit = (CommitPoint) commitsToDelete.get(i);
+        CommitPoint commit = commitsToDelete.get(i);
         if (infoStream != null) {
           message("deleteCommits: now decRef commit \"" + commit.getSegmentsFileName() + "\"");
         }
@@ -295,7 +295,7 @@
       int readFrom = 0;
       int writeTo = 0;
       while(readFrom < size) {
-        CommitPoint commit = (CommitPoint) commits.get(readFrom);
+        CommitPoint commit = commits.get(readFrom);
         if (!commit.deleted) {
           if (writeTo != readFrom) {
             commits.set(writeTo, commits.get(readFrom));

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/IndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/IndexWriter.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/IndexWriter.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/IndexWriter.java Sat Oct 24 21:23:15 2009
@@ -591,7 +591,7 @@
         readBufferSize = BufferedIndexInput.BUFFER_SIZE;
       }
 
-      SegmentReader sr = (SegmentReader) readerMap.get(info);
+      SegmentReader sr = readerMap.get(info);
       if (sr == null) {
         // TODO: we may want to avoid doing this while
         // synchronized
@@ -621,7 +621,7 @@
 
     // Returns a ref
     public synchronized SegmentReader getIfExists(SegmentInfo info) throws IOException {
-      SegmentReader sr = (SegmentReader) readerMap.get(info);
+      SegmentReader sr = readerMap.get(info);
       if (sr != null) {
         sr.incRef();
       }
@@ -2375,7 +2375,7 @@
       if (spec != null) {
         final int numMerges = spec.merges.size();
         for(int i=0;i<numMerges;i++)
-          registerMerge((MergePolicy.OneMerge) spec.merges.get(i));
+          registerMerge(spec.merges.get(i));
       }
     }
 
@@ -2396,7 +2396,7 @@
           // if any of them have hit an exception.
           running = false;
           for(int i=0;i<numMerges;i++) {
-            final MergePolicy.OneMerge merge = (MergePolicy.OneMerge) spec.merges.get(i);
+            final MergePolicy.OneMerge merge = spec.merges.get(i);
             if (pendingMerges.contains(merge) || runningMerges.contains(merge))
               running = true;
             Throwable t = merge.getException();
@@ -2491,7 +2491,7 @@
       if (spec != null) {
         final int numMerges = spec.merges.size();
         for(int i=0;i<numMerges;i++) {
-          final MergePolicy.OneMerge merge = ((MergePolicy.OneMerge) spec.merges.get(i));
+          final MergePolicy.OneMerge merge = ( spec.merges.get(i));
           merge.optimize = true;
           merge.maxNumSegmentsOptimize = maxNumSegmentsOptimize;
         }
@@ -2503,7 +2503,7 @@
     if (spec != null) {
       final int numMerges = spec.merges.size();
       for(int i=0;i<numMerges;i++)
-        registerMerge((MergePolicy.OneMerge) spec.merges.get(i));
+        registerMerge(spec.merges.get(i));
     }
   }
 
@@ -2515,7 +2515,7 @@
       return null;
     else {
       // Advance the merge from pending to running
-      MergePolicy.OneMerge merge = (MergePolicy.OneMerge) pendingMerges.removeFirst();
+      MergePolicy.OneMerge merge = pendingMerges.removeFirst();
       runningMerges.add(merge);
       return merge;
     }
@@ -2529,7 +2529,7 @@
     else {
       Iterator<MergePolicy.OneMerge> it = pendingMerges.iterator();
       while(it.hasNext()) {
-        MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.next();
+        MergePolicy.OneMerge merge = it.next();
         if (merge.isExternal) {
           // Advance the merge from pending to running
           it.remove();

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/MergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/MergePolicy.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/MergePolicy.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/MergePolicy.java Sat Oct 24 21:23:15 2009
@@ -163,7 +163,7 @@
       b.append("MergeSpec:\n");
       final int count = merges.size();
       for(int i=0;i<count;i++)
-        b.append("  ").append(1 + i).append(": ").append(((OneMerge) merges.get(i)).segString(dir));
+        b.append("  ").append(1 + i).append(": ").append(merges.get(i).segString(dir));
       return b.toString();
     }
   }

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/MultiReader.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/MultiReader.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/MultiReader.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/MultiReader.java Sat Oct 24 21:23:15 2009
@@ -73,7 +73,7 @@
   }
   
   private void initialize(IndexReader[] subReaders, boolean closeSubReaders) throws IOException {
-    this.subReaders = (IndexReader[]) subReaders.clone();
+    this.subReaders =  subReaders.clone();
     starts = new int[subReaders.length + 1];    // build starts array
     decrefOnClose = new boolean[subReaders.length];
     Bits[] subs = new Bits[subReaders.length];

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/MultipleTermPositions.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/MultipleTermPositions.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/MultipleTermPositions.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/MultipleTermPositions.java Sat Oct 24 21:23:15 2009
@@ -144,7 +144,7 @@
 
   public final boolean skipTo(int target) throws IOException {
     while (_termPositionsQueue.peek() != null && target > _termPositionsQueue.peek().doc()) {
-      TermPositions tp = (TermPositions) _termPositionsQueue.pop();
+      TermPositions tp =  _termPositionsQueue.pop();
       if (tp.skipTo(target))
         _termPositionsQueue.add(tp);
       else
@@ -163,7 +163,7 @@
 
   public final void close() throws IOException {
     while (_termPositionsQueue.size() > 0)
-      ((TermPositions) _termPositionsQueue.pop()).close();
+      _termPositionsQueue.pop().close();
   }
 
   /**

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/NormsWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/NormsWriter.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/NormsWriter.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/NormsWriter.java Sat Oct 24 21:23:15 2009
@@ -35,6 +35,8 @@
  *  merges all of these together into a single _X.nrm file.
  */
 
+// TODO: Fix the unchecked collections, I do not understand the whole code here -- Uwe
+@SuppressWarnings("unchecked")
 final class NormsWriter extends InvertedDocEndConsumer {
 
   private static final byte defaultNorm = Similarity.encodeNorm(1.0f);

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/ParallelReader.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/ParallelReader.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/ParallelReader.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/ParallelReader.java Sat Oct 24 21:23:15 2009
@@ -251,8 +251,8 @@
       List<Boolean> newDecrefOnClose = new ArrayList<Boolean>();
       ParallelReader pr = new ParallelReader();
       for (int i = 0; i < readers.size(); i++) {
-        IndexReader oldReader = (IndexReader) readers.get(i);
-        IndexReader newReader = (IndexReader) newReaders.get(i);
+        IndexReader oldReader = readers.get(i);
+        IndexReader newReader = newReaders.get(i);
         if (newReader == oldReader) {
           newDecrefOnClose.add(Boolean.TRUE);
           newReader.incRef();
@@ -349,8 +349,7 @@
       if (vector != null)
         results.add(vector);
     }
-    return (TermFreqVector[])
-      results.toArray(new TermFreqVector[results.size()]);
+    return results.toArray(new TermFreqVector[results.size()]);
   }
 
   public TermFreqVector getTermFreqVector(int n, String field)

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java Sat Oct 24 21:23:15 2009
@@ -70,7 +70,7 @@
   public void map(String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) {
     for (int i = 0; i < positions.length; i++) {
       Integer posVal = Integer.valueOf(positions[i]);
-      TVPositionInfo pos = (TVPositionInfo) currentPositions.get(posVal);
+      TVPositionInfo pos = currentPositions.get(posVal);
       if (pos == null) {
         pos = new TVPositionInfo(positions[i], storeOffsets);
         currentPositions.put(posVal, pos);

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentMerger.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentMerger.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentMerger.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SegmentMerger.java Sat Oct 24 21:23:15 2009
@@ -127,7 +127,7 @@
    * @return The ith reader to be merged
    */
   final IndexReader segmentReader(int i) {
-    return (IndexReader) readers.get(i);
+    return readers.get(i);
   }
 
   /**
@@ -267,7 +267,7 @@
     // FieldInfos, then we can do a bulk copy of the
     // stored fields:
     for (int i = 0; i < numReaders; i++) {
-      IndexReader reader = (IndexReader) readers.get(i);
+      IndexReader reader = readers.get(i);
       if (reader instanceof SegmentReader) {
         SegmentReader segmentReader = (SegmentReader) reader;
         boolean same = true;
@@ -606,7 +606,7 @@
     int base = 0;
     final int readerCount = readers.size();
     for (int i = 0; i < readerCount; i++) {
-      IndexReader reader = (IndexReader) readers.get(i);
+      IndexReader reader = readers.get(i);
       SegmentMergeInfo smi = new SegmentMergeInfo(base, reader);
       int[] docMap  = smi.getDocMap();
       if (docMap != null) {

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SortedTermVectorMapper.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SortedTermVectorMapper.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SortedTermVectorMapper.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/SortedTermVectorMapper.java Sat Oct 24 21:23:15 2009
@@ -61,7 +61,7 @@
    */
   //We need to combine any previous mentions of the term
   public void map(String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) {
-    TermVectorEntry entry = (TermVectorEntry) termToTVE.get(term);
+    TermVectorEntry entry =  termToTVE.get(term);
     if (entry == null) {
       entry = new TermVectorEntry(ALL, term, frequency, 
               storeOffsets == true ? offsets : null,

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermsHash.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermsHash.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermsHash.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/index/TermsHash.java Sat Oct 24 21:23:15 2009
@@ -36,6 +36,8 @@
  *  under each term.
  */
 
+// TODO: Fix the unchecked collections, I do not understand the whole code here -- Uwe
+@SuppressWarnings("unchecked")
 final class TermsHash extends InvertedDocConsumer {
 
   final TermsHashConsumer consumer;

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/MultiFieldQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/MultiFieldQueryParser.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/MultiFieldQueryParser.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/MultiFieldQueryParser.java Sat Oct 24 21:23:15 2009
@@ -27,6 +27,7 @@
 import org.apache.lucene.search.MultiPhraseQuery;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.util.Version;
 
 /**
  * A QueryParser which constructs queries to search multiple fields.
@@ -36,7 +37,7 @@
 public class MultiFieldQueryParser extends QueryParser
 {
   protected String[] fields;
-  protected Map      boosts;
+  protected Map<String,Float> boosts;
 
   /**
    * Creates a MultiFieldQueryParser. 
@@ -65,8 +66,8 @@
    * <p>In other words, all the query's terms must appear, but it doesn't matter in
    * what fields they appear.</p>
    */
-  public MultiFieldQueryParser(String[] fields, Analyzer analyzer, Map boosts) {
-    this(fields,analyzer);
+  public MultiFieldQueryParser(Version matchVersion, String[] fields, Analyzer analyzer, Map<String,Float> boosts) {
+    this(matchVersion, fields, analyzer);
     this.boosts = boosts;
   }
   
@@ -90,21 +91,21 @@
    * <p>In other words, all the query's terms must appear, but it doesn't matter in
    * what fields they appear.</p>
    */
-  public MultiFieldQueryParser(String[] fields, Analyzer analyzer) {
-    super(null, analyzer);
+  public MultiFieldQueryParser(Version matchVersion, String[] fields, Analyzer analyzer) {
+    super(matchVersion, null, analyzer);
     this.fields = fields;
   }
   
   protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException {
     if (field == null) {
-      List clauses = new ArrayList();
+      List<BooleanClause> clauses = new ArrayList<BooleanClause>();
       for (int i = 0; i < fields.length; i++) {
         Query q = super.getFieldQuery(fields[i], queryText);
         if (q != null) {
           //If the user passes a map of boosts
           if (boosts != null) {
             //Get the boost from the map and apply them
-            Float boost = (Float)boosts.get(fields[i]);
+            Float boost = boosts.get(fields[i]);
             if (boost != null) {
               q.setBoost(boost.floatValue());
             }
@@ -139,7 +140,7 @@
   protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException
   {
     if (field == null) {
-      List clauses = new ArrayList();
+      List<BooleanClause> clauses = new ArrayList<BooleanClause>();
       for (int i = 0; i < fields.length; i++) {
         clauses.add(new BooleanClause(getFuzzyQuery(fields[i], termStr, minSimilarity),
             BooleanClause.Occur.SHOULD));
@@ -152,7 +153,7 @@
   protected Query getPrefixQuery(String field, String termStr) throws ParseException
   {
     if (field == null) {
-      List clauses = new ArrayList();
+      List<BooleanClause> clauses = new ArrayList<BooleanClause>();
       for (int i = 0; i < fields.length; i++) {
         clauses.add(new BooleanClause(getPrefixQuery(fields[i], termStr),
             BooleanClause.Occur.SHOULD));
@@ -164,7 +165,7 @@
 
   protected Query getWildcardQuery(String field, String termStr) throws ParseException {
     if (field == null) {
-      List clauses = new ArrayList();
+      List<BooleanClause> clauses = new ArrayList<BooleanClause>();
       for (int i = 0; i < fields.length; i++) {
         clauses.add(new BooleanClause(getWildcardQuery(fields[i], termStr),
             BooleanClause.Occur.SHOULD));
@@ -177,7 +178,7 @@
  
   protected Query getRangeQuery(String field, String part1, String part2, boolean inclusive) throws ParseException {
     if (field == null) {
-      List clauses = new ArrayList();
+      List<BooleanClause> clauses = new ArrayList<BooleanClause>();
       for (int i = 0; i < fields.length; i++) {
         clauses.add(new BooleanClause(getRangeQuery(fields[i], part1, part2, inclusive),
             BooleanClause.Occur.SHOULD));
@@ -196,6 +197,7 @@
    * (field1:query1) (field2:query2) (field3:query3)...(fieldx:queryx)
    * </code>
    * </pre>
+   * @param matchVersion Lucene version to match; this is passed through to QueryParser.
    * @param queries Queries strings to parse
    * @param fields Fields to search on
    * @param analyzer Analyzer to use
@@ -203,7 +205,7 @@
    * @throws IllegalArgumentException if the length of the queries array differs
    *  from the length of the fields array
    */
-  public static Query parse(String[] queries, String[] fields,
+  public static Query parse(Version matchVersion, String[] queries, String[] fields,
       Analyzer analyzer) throws ParseException
   {
     if (queries.length != fields.length)
@@ -211,7 +213,7 @@
     BooleanQuery bQuery = new BooleanQuery();
     for (int i = 0; i < fields.length; i++)
     {
-      QueryParser qp = new QueryParser(fields[i], analyzer);
+      QueryParser qp = new QueryParser(matchVersion, fields[i], analyzer);
       Query q = qp.parse(queries[i]);
       if (q!=null && // q never null, just being defensive
           (!(q instanceof BooleanQuery) || ((BooleanQuery)q).getClauses().length>0)) {
@@ -243,6 +245,7 @@
    * </code>
    * </pre>
    *
+   * @param matchVersion Lucene version to match; this is passed through to QueryParser.
    * @param query Query string to parse
    * @param fields Fields to search on
    * @param flags Flags describing the fields
@@ -251,13 +254,13 @@
    * @throws IllegalArgumentException if the length of the fields array differs
    *  from the length of the flags array
    */
-  public static Query parse(String query, String[] fields,
+  public static Query parse(Version matchVersion, String query, String[] fields,
       BooleanClause.Occur[] flags, Analyzer analyzer) throws ParseException {
     if (fields.length != flags.length)
       throw new IllegalArgumentException("fields.length != flags.length");
     BooleanQuery bQuery = new BooleanQuery();
     for (int i = 0; i < fields.length; i++) {
-      QueryParser qp = new QueryParser(fields[i], analyzer);
+      QueryParser qp = new QueryParser(matchVersion, fields[i], analyzer);
       Query q = qp.parse(query);
       if (q!=null && // q never null, just being defensive 
           (!(q instanceof BooleanQuery) || ((BooleanQuery)q).getClauses().length>0)) {
@@ -290,6 +293,7 @@
    * </code>
    * </pre>
    *
+   * @param matchVersion Lucene version to match; this is passed through to QueryParser.
    * @param queries Queries string to parse
    * @param fields Fields to search on
    * @param flags Flags describing the fields
@@ -298,7 +302,7 @@
    * @throws IllegalArgumentException if the length of the queries, fields,
    *  and flags array differ
    */
-  public static Query parse(String[] queries, String[] fields, BooleanClause.Occur[] flags,
+  public static Query parse(Version matchVersion, String[] queries, String[] fields, BooleanClause.Occur[] flags,
       Analyzer analyzer) throws ParseException
   {
     if (!(queries.length == fields.length && queries.length == flags.length))
@@ -306,7 +310,7 @@
     BooleanQuery bQuery = new BooleanQuery();
     for (int i = 0; i < fields.length; i++)
     {
-      QueryParser qp = new QueryParser(fields[i], analyzer);
+      QueryParser qp = new QueryParser(matchVersion, fields[i], analyzer);
       Query q = qp.parse(queries[i]);
       if (q!=null && // q never null, just being defensive
           (!(q instanceof BooleanQuery) || ((BooleanQuery)q).getClauses().length>0)) {

Modified: lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/QueryParser.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/QueryParser.java?rev=829454&r1=829453&r2=829454&view=diff
==============================================================================
--- lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/QueryParser.java (original)
+++ lucene/java/branches/flex_1458/src/java/org/apache/lucene/queryParser/QueryParser.java Sat Oct 24 21:23:15 2009
@@ -12,7 +12,6 @@
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
-import java.util.Vector;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.CachingTokenFilter;
@@ -34,7 +33,7 @@
 import org.apache.lucene.search.TermRangeQuery;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.WildcardQuery;
-import org.apache.lucene.util.Parameter;
+import org.apache.lucene.util.Version;
 
 /**
  * This class is generated by JavaCC.  The most important method is
@@ -101,6 +100,14 @@
  * <p><b>NOTE</b>: there is a new QueryParser in contrib, which matches
  * the same syntax as this class, but is more modular,
  * enabling substantial customization to how a query is created.
+ *
+ * <a name="version"/>
+ * <p><b>NOTE</b>: You must specify the required {@link Version}
+ * compatibility when creating QueryParser:
+ * <ul>
+ *    <li> As of 2.9, {@link #setEnablePositionIncrements} is true by
+ *         default.
+ * </ul>
  */
 public class QueryParser implements QueryParserConstants {
 
@@ -125,7 +132,7 @@
   boolean lowercaseExpandedTerms = true;
   MultiTermQuery.RewriteMethod multiTermRewriteMethod = MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
   boolean allowLeadingWildcard = false;
-  boolean enablePositionIncrements = false;
+  boolean enablePositionIncrements = true;
 
   Analyzer analyzer;
   String field;
@@ -137,7 +144,7 @@
   // the default date resolution
   DateTools.Resolution dateResolution = null;
   // maps field names to date resolutions
-  Map fieldToDateResolution = null;
+  Map<String,DateTools.Resolution> fieldToDateResolution = null;
 
   // The collator to use when determining range inclusion,
   // for use when constructing RangeQuerys.
@@ -146,23 +153,22 @@
   /** The default operator for parsing queries. 
    * Use {@link QueryParser#setDefaultOperator} to change it.
    */
-  static public final class Operator extends Parameter {
-    private Operator(String name) {
-      super(name);
-    }
-    static public final Operator OR = new Operator("OR");
-    static public final Operator AND = new Operator("AND");
-  }
-
+  static public enum Operator { OR, AND }
 
   /** Constructs a query parser.
+   *  @param matchVersion  Lucene version to match.  See <a href="#version">above</a>)
    *  @param f  the default field for query terms.
    *  @param a   used to find terms in the query text.
    */
-  public QueryParser(String f, Analyzer a) {
+  public QueryParser(Version matchVersion, String f, Analyzer a) {
     this(new FastCharStream(new StringReader("")));
     analyzer = a;
     field = f;
+    if (matchVersion.onOrAfter(Version.LUCENE_29)) {
+      enablePositionIncrements = true;
+    } else {
+      enablePositionIncrements = false;
+    }
   }
 
   /** Parses a query string, returning a {@link org.apache.lucene.search.Query}.
@@ -336,29 +342,6 @@
   }
 
   /**
-   * @deprecated Please use {@link #setMultiTermRewriteMethod} instead.
-   */
-  public void setUseOldRangeQuery(boolean useOldRangeQuery) {
-    if (useOldRangeQuery) {
-      setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
-    } else {
-      setMultiTermRewriteMethod(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT);
-    }
-  }
-
-
-  /**
-   * @deprecated Please use {@link #getMultiTermRewriteMethod} instead.
-   */
-  public boolean getUseOldRangeQuery() {
-    if (getMultiTermRewriteMethod() == MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE) {
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  /**
    * By default QueryParser uses {@link MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}
    * when creating a PrefixQuery, WildcardQuery or RangeQuery. This implementation is generally preferable because it 
    * a) Runs faster b) Does not have the scarcity of terms unduly influence score 
@@ -418,7 +401,7 @@
 
     if (fieldToDateResolution == null) {
       // lazily initialize HashMap
-      fieldToDateResolution = new HashMap();
+      fieldToDateResolution = new HashMap<String,DateTools.Resolution>();
     }
 
     fieldToDateResolution.put(fieldName, dateResolution);
@@ -440,7 +423,7 @@
       return this.dateResolution;
     }
 
-    DateTools.Resolution resolution = (DateTools.Resolution) fieldToDateResolution.get(fieldName);
+    DateTools.Resolution resolution = fieldToDateResolution.get(fieldName);
     if (resolution == null) {
       // no date resolutions set for the given field; return default date resolution instead
       resolution = this.dateResolution;
@@ -473,20 +456,13 @@
     return rangeCollator;
   }
 
-  /**
-   * @deprecated use {@link #addClause(List, int, int, Query)} instead.
-   */
-  protected void addClause(Vector clauses, int conj, int mods, Query q) {
-    addClause((List) clauses, conj, mods, q);
-  }
-
-  protected void addClause(List clauses, int conj, int mods, Query q) {
+  protected void addClause(List<BooleanClause> clauses, int conj, int mods, Query q) {
     boolean required, prohibited;
 
     // If this term is introduced by AND, make the preceding term required,
     // unless it's already prohibited
     if (clauses.size() > 0 && conj == CONJ_AND) {
-      BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
+      BooleanClause c = clauses.get(clauses.size()-1);
       if (!c.isProhibited())
         c.setOccur(BooleanClause.Occur.MUST);
     }
@@ -496,7 +472,7 @@
       // unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
       // notice if the input is a OR b, first term is parsed as required; without
       // this modification a OR b would parsed as +a OR b
-      BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
+      BooleanClause c = clauses.get(clauses.size()-1);
       if (!c.isProhibited())
         c.setOccur(BooleanClause.Occur.SHOULD);
     }
@@ -635,7 +611,7 @@
           // phrase query:
           MultiPhraseQuery mpq = newMultiPhraseQuery();
           mpq.setSlop(phraseSlop);
-          List multiTerms = new ArrayList();
+          List<Term> multiTerms = new ArrayList<Term>();
           int position = -1;
           for (int i = 0; i < numTokens; i++) {
             String term = null;
@@ -653,9 +629,9 @@
 
             if (positionIncrement > 0 && multiTerms.size() > 0) {
               if (enablePositionIncrements) {
-                mpq.add((Term[])multiTerms.toArray(new Term[0]),position);
+                mpq.add(multiTerms.toArray(new Term[0]),position);
               } else {
-                mpq.add((Term[])multiTerms.toArray(new Term[0]));
+                mpq.add(multiTerms.toArray(new Term[0]));
               }
               multiTerms.clear();
             }
@@ -663,9 +639,9 @@
             multiTerms.add(new Term(field, term));
           }
           if (enablePositionIncrements) {
-            mpq.add((Term[])multiTerms.toArray(new Term[0]),position);
+            mpq.add(multiTerms.toArray(new Term[0]),position);
           } else {
-            mpq.add((Term[])multiTerms.toArray(new Term[0]));
+            mpq.add(multiTerms.toArray(new Term[0]));
           }
           return mpq;
         }
@@ -885,26 +861,8 @@
    *
    * @return Resulting {@link Query} object.
    * @exception ParseException throw in overridden method to disallow
-   * @deprecated use {@link #getBooleanQuery(List)} instead
-   */
-  protected Query getBooleanQuery(Vector clauses) throws ParseException {
-    return getBooleanQuery((List) clauses, false);
-  }
-
-  /**
-   * Factory method for generating query, given a set of clauses.
-   * By default creates a boolean query composed of clauses passed in.
-   *
-   * Can be overridden by extending classes, to modify query being
-   * returned.
-   *
-   * @param clauses List that contains {@link BooleanClause} instances
-   *    to join.
-   *
-   * @return Resulting {@link Query} object.
-   * @exception ParseException throw in overridden method to disallow
    */
-  protected Query getBooleanQuery(List clauses) throws ParseException {
+  protected Query getBooleanQuery(List<BooleanClause> clauses) throws ParseException {
     return getBooleanQuery(clauses, false);
   }
 
@@ -921,37 +879,16 @@
    *
    * @return Resulting {@link Query} object.
    * @exception ParseException throw in overridden method to disallow
-   * @deprecated use {@link #getBooleanQuery(List, boolean)} instead
    */
-  protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
-    throws ParseException
-  {
-    return getBooleanQuery((List) clauses, disableCoord);
-  }
-
-  /**
-   * Factory method for generating query, given a set of clauses.
-   * By default creates a boolean query composed of clauses passed in.
-   *
-   * Can be overridden by extending classes, to modify query being
-   * returned.
-   *
-   * @param clauses List that contains {@link BooleanClause} instances
-   *    to join.
-   * @param disableCoord true if coord scoring should be disabled.
-   *
-   * @return Resulting {@link Query} object.
-   * @exception ParseException throw in overridden method to disallow
-   */
-  protected Query getBooleanQuery(List clauses, boolean disableCoord)
+  protected Query getBooleanQuery(List<BooleanClause> clauses, boolean disableCoord)
     throws ParseException
   {
     if (clauses.size()==0) {
       return null; // all clause words were filtered away by the analyzer.
     }
     BooleanQuery query = newBooleanQuery(disableCoord);
-    for (int i = 0; i < clauses.size(); i++) {
-      query.add((BooleanClause)clauses.get(i));
+    for(final BooleanClause clause: clauses) {
+      query.add(clause);
     }
     return query;
   }
@@ -1155,7 +1092,7 @@
       System.out.println("Usage: java org.apache.lucene.queryParser.QueryParser <input>");
       System.exit(0);
     }
-    QueryParser qp = new QueryParser("field",
+    QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field",
                            new org.apache.lucene.analysis.SimpleAnalyzer());
     Query q = qp.parse(args[0]);
     System.out.println(q.toString("field"));
@@ -1234,7 +1171,7 @@
   }
 
   final public Query Query(String field) throws ParseException {
-  List clauses = new ArrayList();
+  List<BooleanClause> clauses = new ArrayList<BooleanClause>();
   Query q, firstQuery=null;
   int conj, mods;
     mods = Modifiers();
@@ -1591,12 +1528,6 @@
     finally { jj_save(0, xla); }
   }
 
-  private boolean jj_3R_3() {
-    if (jj_scan_token(STAR)) return true;
-    if (jj_scan_token(COLON)) return true;
-    return false;
-  }
-
   private boolean jj_3R_2() {
     if (jj_scan_token(TERM)) return true;
     if (jj_scan_token(COLON)) return true;
@@ -1613,6 +1544,12 @@
     return false;
   }
 
+  private boolean jj_3R_3() {
+    if (jj_scan_token(STAR)) return true;
+    if (jj_scan_token(COLON)) return true;
+    return false;
+  }
+
   /** Generated Token Manager. */
   public QueryParserTokenManager token_source;
   /** Current token. */
@@ -1641,7 +1578,7 @@
   private int jj_gc = 0;
 
   /** Constructor with user supplied CharStream. */
-  public QueryParser(CharStream stream) {
+  protected QueryParser(CharStream stream) {
     token_source = new QueryParserTokenManager(stream);
     token = new Token();
     jj_ntk = -1;
@@ -1661,7 +1598,7 @@
   }
 
   /** Constructor with generated Token Manager. */
-  public QueryParser(QueryParserTokenManager tm) {
+  protected QueryParser(QueryParserTokenManager tm) {
     token_source = tm;
     token = new Token();
     jj_ntk = -1;



Mime
View raw message