lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From uschind...@apache.org
Subject svn commit: r791173 [2/4] - in /lucene/java/branches/lucene_2_4_back_compat_tests: ./ contrib/highlighter/src/test/org/apache/lucene/search/highlight/ src/java/org/apache/lucene/analysis/ src/java/org/apache/lucene/analysis/standard/ src/java/org/apach...
Date Sat, 04 Jul 2009 20:08:56 GMT
Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/DocInverterPerThread.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/DocInverterPerThread.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/DocInverterPerThread.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/DocInverterPerThread.java Sat Jul  4 20:08:54 2009
@@ -20,14 +20,6 @@
 import java.io.IOException;
 
 import org.apache.lucene.analysis.Token;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.util.Attribute;
 
 /** This is a DocFieldConsumer that inverts each field,
  *  separately, from a Document, and accepts a
@@ -38,94 +30,6 @@
   final InvertedDocConsumerPerThread consumer;
   final InvertedDocEndConsumerPerThread endConsumer;
   final Token localToken = new Token();
-  //TODO: change to SingleTokenTokenStream after Token was removed
-  final SingleTokenTokenStream singleTokenTokenStream = new SingleTokenTokenStream();
-  final BackwardsCompatibilityStream localTokenStream = new BackwardsCompatibilityStream();
-  
-  static class SingleTokenTokenStream extends TokenStream {
-    TermAttribute termAttribute;
-    OffsetAttribute offsetAttribute;
-    
-    SingleTokenTokenStream() {
-      termAttribute = (TermAttribute) addAttribute(TermAttribute.class);
-      offsetAttribute = (OffsetAttribute) addAttribute(OffsetAttribute.class);
-    }
-    
-    public void reinit(String stringValue, int startOffset,  int endOffset) {
-      termAttribute.setTermBuffer(stringValue);
-      offsetAttribute.setStartOffset(startOffset);
-      offsetAttribute.setEndOffset(endOffset);
-    }
-  }
-  
-  /** This stream wrapper is only used to maintain backwards compatibility with the
-   *  old TokenStream API and can be removed in Lucene 3.0
-   * @deprecated 
-   */
-  static class BackwardsCompatibilityStream extends TokenStream {
-    private Token token;
-      
-    TermAttribute termAttribute = new TermAttribute() {
-      public String term() {
-        return token.term();
-      }
-      
-      public char[] termBuffer() {
-        return token.termBuffer();
-      }
-      
-      public int termLength() {
-        return token.termLength();
-      }
-    };
-    OffsetAttribute offsetAttribute = new OffsetAttribute() {
-      public int startOffset() {
-        return token.startOffset();
-      }
-
-      public int endOffset() {
-        return token.endOffset();
-      }
-    };
-    
-    PositionIncrementAttribute positionIncrementAttribute = new PositionIncrementAttribute() {
-      public int getPositionIncrement() {
-        return token.getPositionIncrement();
-      }
-    };
-    
-    FlagsAttribute flagsAttribute = new FlagsAttribute() {
-      public int getFlags() {
-        return token.getFlags();
-      }
-    };
-    
-    PayloadAttribute payloadAttribute = new PayloadAttribute() {
-      public Payload getPayload() {
-        return token.getPayload();
-      }
-    };
-    
-    TypeAttribute typeAttribute = new TypeAttribute() {
-      public String type() {
-        return token.type();
-      }
-    };
-    
-    BackwardsCompatibilityStream() {
-      attributes.put(TermAttribute.class, termAttribute);
-      attributes.put(OffsetAttribute.class, offsetAttribute);
-      attributes.put(PositionIncrementAttribute.class, positionIncrementAttribute);
-      attributes.put(FlagsAttribute.class, flagsAttribute);
-      attributes.put(PayloadAttribute.class, payloadAttribute);
-      attributes.put(TypeAttribute.class, typeAttribute);
-    }
-            
-    public void set(Token token) {
-      this.token = token;
-    }
-  };
-  
   final DocumentsWriter.DocState docState;
 
   final FieldInvertState fieldState = new FieldInvertState();

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/FieldInvertState.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/FieldInvertState.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/FieldInvertState.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/FieldInvertState.java Sat Jul  4 20:08:54 2009
@@ -17,7 +17,6 @@
 package org.apache.lucene.index;
 
 import org.apache.lucene.search.Similarity;
-import org.apache.lucene.util.AttributeSource;
 
 /**
  * This class tracks the number and position / offset parameters of terms
@@ -33,7 +32,6 @@
   int numOverlap;
   int offset;
   float boost;
-  AttributeSource attributeSource;
 
   public FieldInvertState() {
   }
@@ -56,7 +54,6 @@
     numOverlap = 0;
     offset = 0;
     boost = docBoost;
-    attributeSource = null;
   }
 
   /**
@@ -100,8 +97,4 @@
   public float getBoost() {
     return boost;
   }
-  
-  public AttributeSource getAttributeSource() {
-    return attributeSource;
-  }
 }

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java Sat Jul  4 20:08:54 2009
@@ -19,7 +19,7 @@
 
 import java.io.IOException;
 import org.apache.lucene.document.Fieldable;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.Token;
 
 // TODO: break into separate freq and prox writers as
 // codecs; make separate container (tii/tis/skip/*) that can
@@ -32,7 +32,6 @@
   final DocumentsWriter.DocState docState;
   final FieldInvertState fieldState;
   boolean omitTf;
-  PayloadAttribute payloadAttribute;
 
   public FreqProxTermsWriterPerField(TermsHashPerField termsHashPerField, FreqProxTermsWriterPerThread perThread, FieldInfo fieldInfo) {
     this.termsHashPerField = termsHashPerField;
@@ -54,7 +53,7 @@
 
   boolean hasPayloads;
 
-  void skippingLongTerm() throws IOException {}
+  void skippingLongTerm(Token t) throws IOException {}
 
   public int compareTo(Object other0) {
     FreqProxTermsWriterPerField other = (FreqProxTermsWriterPerField) other0;
@@ -65,7 +64,6 @@
     // Record, up front, whether our in-RAM format will be
     // with or without term freqs:
     omitTf = fieldInfo.omitTf;
-    payloadAttribute = null;
   }
 
   boolean start(Fieldable[] fields, int count) {
@@ -74,23 +72,9 @@
         return true;
     return false;
   }     
-  
-  void start(Fieldable f) {
-    if (fieldState.attributeSource.hasAttribute(PayloadAttribute.class)) {
-      payloadAttribute = (PayloadAttribute) fieldState.attributeSource.getAttribute(PayloadAttribute.class);
-    } else {
-      payloadAttribute = null;
-    }
-  }
 
-  final void writeProx(FreqProxTermsWriter.PostingList p, int proxCode) {
-    final Payload payload;
-    if (payloadAttribute == null) {
-      payload = null;
-    } else {
-      payload = payloadAttribute.getPayload();
-    }
-    
+  final void writeProx(Token t, FreqProxTermsWriter.PostingList p, int proxCode) {
+    final Payload payload = t.getPayload();    
     if (payload != null && payload.length > 0) {
       termsHashPerField.writeVInt(1, (proxCode<<1)|1);
       termsHashPerField.writeVInt(1, payload.length);
@@ -101,7 +85,7 @@
     p.lastPosition = fieldState.position;
   }
 
-  final void newTerm(RawPostingList p0) {
+  final void newTerm(Token t, RawPostingList p0) {
     // First time we're seeing this term since the last
     // flush
     assert docState.testPoint("FreqProxTermsWriterPerField.newTerm start");
@@ -112,11 +96,11 @@
     } else {
       p.lastDocCode = docState.docID << 1;
       p.docFreq = 1;
-      writeProx(p, fieldState.position);
+      writeProx(t, p, fieldState.position);
     }
   }
 
-  final void addTerm(RawPostingList p0) {
+  final void addTerm(Token t, RawPostingList p0) {
 
     assert docState.testPoint("FreqProxTermsWriterPerField.addTerm start");
 
@@ -148,10 +132,10 @@
         p.docFreq = 1;
         p.lastDocCode = (docState.docID - p.lastDocID) << 1;
         p.lastDocID = docState.docID;
-        writeProx(p, fieldState.position);
+        writeProx(t, p, fieldState.position);
       } else {
         p.docFreq++;
-        writeProx(p, fieldState.position-p.lastPosition);
+        writeProx(t, p, fieldState.position-p.lastPosition);
       }
     }
   }

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/InvertedDocConsumerPerField.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/InvertedDocConsumerPerField.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/InvertedDocConsumerPerField.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/InvertedDocConsumerPerField.java Sat Jul  4 20:08:54 2009
@@ -17,9 +17,9 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-
 import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.analysis.Token;
+import java.io.IOException;
 
 abstract class InvertedDocConsumerPerField {
 
@@ -29,11 +29,8 @@
   // fields:
   abstract boolean start(Fieldable[] fields, int count) throws IOException;
 
-  // Called before a field instance is being processed
-  abstract void start(Fieldable field);
-  
   // Called once per inverted token
-  abstract void add() throws IOException;
+  abstract void add(Token token) throws IOException;
 
   // Called once per field per document, after all Fieldable
   // occurrences are inverted

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/Payload.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/Payload.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/Payload.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/Payload.java Sat Jul  4 20:08:54 2009
@@ -19,6 +19,7 @@
 
 import java.io.Serializable;
 
+import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.util.ArrayUtil;
 
@@ -28,7 +29,7 @@
  *  specific term.  
  *  <p>
  *  To store payloads in the index a {@link TokenStream} has to be used that
- *  produces payload data.
+ *  produces {@link Token}s containing payload data.
  *  <p>
  *  Use {@link TermPositions#getPayloadLength()} and {@link TermPositions#getPayload(byte[], int)}
  *  to retrieve the payloads from the index.<br>

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java Sat Jul  4 20:08:54 2009
@@ -18,11 +18,10 @@
  */
 
 import java.io.IOException;
-
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.util.UnicodeUtil;
+import org.apache.lucene.analysis.Token;
 import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.util.UnicodeUtil;
 
 final class TermVectorsTermsWriterPerField extends TermsHashConsumerPerField {
 
@@ -38,8 +37,7 @@
   boolean doVectorOffsets;
 
   int maxNumPostings;
-  OffsetAttribute offsetAttribute = null;
-  
+
   public TermVectorsTermsWriterPerField(TermsHashPerField termsHashPerField, TermVectorsTermsWriterPerThread perThread, FieldInfo fieldInfo) {
     this.termsHashPerField = termsHashPerField;
     this.perThread = perThread;
@@ -193,16 +191,8 @@
     termsHashPerField.shrinkHash(maxNumPostings);
     maxNumPostings = 0;
   }
-  
-  void start(Fieldable f) {
-    if (doVectorOffsets && fieldState.attributeSource.hasAttribute(OffsetAttribute.class)) {
-      offsetAttribute = (OffsetAttribute) fieldState.attributeSource.getAttribute(OffsetAttribute.class);
-    } else {
-      offsetAttribute = null;
-    }
-  }
 
-  void newTerm(RawPostingList p0) {
+  void newTerm(Token t, RawPostingList p0) {
 
     assert docState.testPoint("TermVectorsTermsWriterPerField.newTerm start");
 
@@ -211,9 +201,8 @@
     p.freq = 1;
 
     if (doVectorOffsets) {
-      int startOffset = fieldState.offset + offsetAttribute.startOffset();;
-      int endOffset = fieldState.offset + offsetAttribute.endOffset();
-      
+      final int startOffset = fieldState.offset + t.startOffset();
+      final int endOffset = fieldState.offset + t.endOffset();
       termsHashPerField.writeVInt(1, startOffset);
       termsHashPerField.writeVInt(1, endOffset - startOffset);
       p.lastOffset = endOffset;
@@ -225,7 +214,7 @@
     }
   }
 
-  void addTerm(RawPostingList p0) {
+  void addTerm(Token t, RawPostingList p0) {
 
     assert docState.testPoint("TermVectorsTermsWriterPerField.addTerm start");
 
@@ -233,9 +222,8 @@
     p.freq++;
 
     if (doVectorOffsets) {
-      int startOffset = fieldState.offset + offsetAttribute.startOffset();;
-      int endOffset = fieldState.offset + offsetAttribute.endOffset();
-      
+      final int startOffset = fieldState.offset + t.startOffset();
+      final int endOffset = fieldState.offset + t.endOffset();
       termsHashPerField.writeVInt(1, startOffset - p.lastOffset);
       termsHashPerField.writeVInt(1, endOffset - startOffset);
       p.lastOffset = endOffset;
@@ -247,5 +235,5 @@
     }
   }
 
-  void skippingLongTerm() {}
+  void skippingLongTerm(Token t) {}
 }

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/TermsHashConsumerPerField.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/TermsHashConsumerPerField.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/TermsHashConsumerPerField.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/TermsHashConsumerPerField.java Sat Jul  4 20:08:54 2009
@@ -23,15 +23,14 @@
  *  multiple streams for each unique Token. */
 
 import java.io.IOException;
-
 import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.analysis.Token;
 
 abstract class TermsHashConsumerPerField {
   abstract boolean start(Fieldable[] fields, int count) throws IOException;
   abstract void finish() throws IOException;
-  abstract void skippingLongTerm() throws IOException;
-  abstract void start(Fieldable field);
-  abstract void newTerm(RawPostingList p) throws IOException;
-  abstract void addTerm(RawPostingList p) throws IOException;
+  abstract void skippingLongTerm(Token t) throws IOException;
+  abstract void newTerm(Token t, RawPostingList p) throws IOException;
+  abstract void addTerm(Token t, RawPostingList p) throws IOException;
   abstract int getStreamCount();
 }

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/TermsHashPerField.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/TermsHashPerField.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/TermsHashPerField.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/index/TermsHashPerField.java Sat Jul  4 20:08:54 2009
@@ -20,8 +20,8 @@
 import java.io.IOException;
 import java.util.Arrays;
 
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.analysis.Token;
 import org.apache.lucene.util.UnicodeUtil;
 
 final class TermsHashPerField extends InvertedDocConsumerPerField {
@@ -31,8 +31,7 @@
   final TermsHashPerThread perThread;
   final DocumentsWriter.DocState docState;
   final FieldInvertState fieldState;
-  TermAttribute termAtt;
-  
+
   // Copied from our perThread
   final CharBlockPool charPool;
   final IntBlockPool intPool;
@@ -50,7 +49,7 @@
   private int postingsHashMask = postingsHashSize-1;
   private RawPostingList[] postingsHash = new RawPostingList[postingsHashSize];
   private RawPostingList p;
-  
+
   public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHashPerThread perThread, final TermsHashPerThread nextPerThread, final FieldInfo fieldInfo) {
     this.perThread = perThread;
     intPool = perThread.intPool;
@@ -248,14 +247,6 @@
   private boolean doCall;
   private boolean doNextCall;
 
-  void start(Fieldable f) {
-    termAtt = (TermAttribute) fieldState.attributeSource.getAttribute(TermAttribute.class);
-    consumer.start(f);
-    if (nextPerField != null) {
-      nextPerField.start(f);
-    }
-  }
-  
   boolean start(Fieldable[] fields, int count) throws IOException {
     doCall = consumer.start(fields, count);
     if (nextPerField != null)
@@ -266,7 +257,7 @@
   // Secondary entry point (for 2nd & subsequent TermsHash),
   // because token text has already been "interned" into
   // textStart, so we hash by textStart
-  public void add(int textStart) throws IOException {
+  public void add(Token token, int textStart) throws IOException {
 
     int code = textStart;
 
@@ -329,17 +320,17 @@
       }
       p.byteStart = intUptos[intUptoStart];
 
-      consumer.newTerm(p);
+      consumer.newTerm(token, p);
 
     } else {
       intUptos = intPool.buffers[p.intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
       intUptoStart = p.intStart & DocumentsWriter.INT_BLOCK_MASK;
-      consumer.addTerm(p);
+      consumer.addTerm(token, p);
     }
   }
 
   // Primary entry point (for first TermsHash)
-  void add() throws IOException {
+  void add(Token token) throws IOException {
 
     assert !postingsCompacted;
 
@@ -347,8 +338,8 @@
     // term text into textStart address
 
     // Get the text of this term.
-    final char[] tokenText = termAtt.termBuffer();;
-    final int tokenTextLen = termAtt.termLength();
+    final char[] tokenText = token.termBuffer();
+    final int tokenTextLen = token.termLength();
 
     // Compute hashcode & replace any invalid UTF16 sequences
     int downto = tokenTextLen;
@@ -412,7 +403,7 @@
           if (docState.maxTermPrefix == null)
             docState.maxTermPrefix = new String(tokenText, 0, 30);
 
-          consumer.skippingLongTerm();
+          consumer.skippingLongTerm(token);
           return;
         }
         charPool.nextBuffer();
@@ -459,16 +450,16 @@
       }
       p.byteStart = intUptos[intUptoStart];
 
-      consumer.newTerm(p);
+      consumer.newTerm(token, p);
 
     } else {
       intUptos = intPool.buffers[p.intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
       intUptoStart = p.intStart & DocumentsWriter.INT_BLOCK_MASK;
-      consumer.addTerm(p);
+      consumer.addTerm(token, p);
     }
 
     if (doNextCall)
-      nextPerField.add(p.textStart);
+      nextPerField.add(token, p.textStart);
   }
 
   int[] intUptos;

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/CharStream.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/CharStream.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/CharStream.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/CharStream.java Sat Jul  4 20:08:54 2009
@@ -109,4 +109,4 @@
   void Done();
 
 }
-/* JavaCC - OriginalChecksum=a83909a2403f969f94d18375f9f143e4 (do not edit this line) */
+/* JavaCC - OriginalChecksum=32a89423891f765dde472f7ef0e3ef7b (do not edit this line) */

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/ParseException.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/ParseException.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/ParseException.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/ParseException.java Sat Jul  4 20:08:54 2009
@@ -195,4 +195,4 @@
    }
 
 }
-/* JavaCC - OriginalChecksum=c63b396885c4ff44d7aa48d3feae60cd (do not edit this line) */
+/* JavaCC - OriginalChecksum=c7631a240f7446940695eac31d9483ca (do not edit this line) */

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/QueryParser.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/QueryParser.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/QueryParser.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/QueryParser.java Sat Jul  4 20:08:54 2009
@@ -3,8 +3,8 @@
 
 import java.io.IOException;
 import java.io.StringReader;
-import java.text.Collator;
 import java.text.DateFormat;
+import java.text.Collator;
 import java.util.ArrayList;
 import java.util.Calendar;
 import java.util.Date;
@@ -15,10 +15,7 @@
 import java.util.Vector;
 
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.CachingTokenFilter;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.document.DateField;
 import org.apache.lucene.document.DateTools;
 import org.apache.lucene.index.Term;
@@ -97,6 +94,7 @@
  * </p>
  *
  * <p>Note that QueryParser is <em>not</em> thread-safe.</p>
+ *
  */
 public class QueryParser implements QueryParserConstants {
 
@@ -119,7 +117,7 @@
   private Operator operator = OR_OPERATOR;
 
   boolean lowercaseExpandedTerms = true;
-  boolean constantScoreRewrite= true;
+  boolean useOldRangeQuery= false;
   boolean allowLeadingWildcard = false;
   boolean enablePositionIncrements = false;
 
@@ -136,7 +134,7 @@
   Map fieldToDateResolution = null;
 
   // The collator to use when determining range inclusion,
-  // for use when constructing RangeQuerys.
+  // for use when constructing RangeQuerys and ConstantScoreRangeQuerys.
   Collator rangeCollator = null;
 
   /** The default operator for parsing queries. 
@@ -326,40 +324,24 @@
   }
 
   /**
-   * @deprecated Please use {@link #setConstantScoreRewrite} instead.
-   */
-  public void setUseOldRangeQuery(boolean useOldRangeQuery) {
-    constantScoreRewrite = !useOldRangeQuery;
-  }
-
-
-  /**
-   * @deprecated Please use {@link #getConstantScoreRewrite} instead.
-   */
-  public boolean getUseOldRangeQuery() {
-    return !constantScoreRewrite;
-  }
-
-  /**
-   * By default QueryParser uses constant-score rewriting
-   * when creating a PrefixQuery, WildcardQuery or RangeQuery. This implementation is generally preferable because it 
-   * a) Runs faster b) Does not have the scarcity of terms unduly influence score 
+   * By default QueryParser uses new ConstantScoreRangeQuery in preference to RangeQuery
+   * for range queries. This implementation is generally preferable because it 
+   * a) Runs faster b) Does not have the scarcity of range terms unduly influence score 
    * c) avoids any "TooManyBooleanClauses" exception.
-   * However, if your application really needs to use the
-   * old-fashioned BooleanQuery expansion rewriting and the above
-   * points are not relevant then set this option to <code>true</code>
+   * However, if your application really needs to use the old-fashioned RangeQuery and the above
+   * points are not required then set this option to <code>true</code>
    * Default is <code>false</code>.
    */
-  public void setConstantScoreRewrite(boolean v) {
-    constantScoreRewrite = v;
+  public void setUseOldRangeQuery(boolean useOldRangeQuery) {
+    this.useOldRangeQuery = useOldRangeQuery;
   }
 
 
   /**
-   * @see #setConstantScoreRewrite(boolean)
+   * @see #setUseOldRangeQuery(boolean)
    */
-  public boolean getConstantScoreRewrite() {
-    return constantScoreRewrite;
+  public boolean getUseOldRangeQuery() {
+    return useOldRangeQuery;
   }
 
   /**
@@ -433,7 +415,9 @@
 
   /** 
    * Sets the collator used to determine index term inclusion in ranges
-   * for RangeQuerys.
+   * specified either for ConstantScoreRangeQuerys or RangeQuerys (if
+   * {@link #setUseOldRangeQuery(boolean)} is called with a <code>true</code>
+   * value.)
    * <p/>
    * <strong>WARNING:</strong> Setting the rangeCollator to a non-null
    * collator using this method will cause every single index Term in the
@@ -442,6 +426,7 @@
    * be very slow.
    *
    *  @param rc  the collator to use when constructing RangeQuerys
+   *             and ConstantScoreRangeQuerys
    */
   public void setRangeCollator(Collator rc) {
     rangeCollator = rc;
@@ -449,7 +434,9 @@
 
   /**
    * @return the collator used to determine index term inclusion in ranges
-   * for RangeQuerys.
+   *  specified either for ConstantScoreRangeQuerys or RangeQuerys (if
+   *  {@link #setUseOldRangeQuery(boolean)} is called with a <code>true</code>
+   *  value.)
    */
   public Collator getRangeCollator() {
     return rangeCollator;
@@ -521,126 +508,48 @@
     // PhraseQuery, or nothing based on the term count
 
     TokenStream source = analyzer.tokenStream(field, new StringReader(queryText));
-    CachingTokenFilter buffer = new CachingTokenFilter(source);
-    TermAttribute termAtt = null;
-    PositionIncrementAttribute posIncrAtt = null;
-    int numTokens = 0;
-
-    org.apache.lucene.analysis.Token reusableToken = null;
-    org.apache.lucene.analysis.Token nextToken = null;
-
-
-    boolean useNewAPI = TokenStream.useNewAPIDefault();
-
-    if (useNewAPI) {
-      boolean success = false;
-      try {
-        buffer.reset();
-        success = true;
-      } catch (IOException e) {
-        // success==false if we hit an exception
-      }
-      if (success) {
-        if (buffer.hasAttribute(TermAttribute.class)) {
-          termAtt = (TermAttribute) buffer.getAttribute(TermAttribute.class);
-        }
-        if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
-          posIncrAtt = (PositionIncrementAttribute) buffer.getAttribute(PositionIncrementAttribute.class);
-        }
-      }
-    } else {
-      reusableToken = new org.apache.lucene.analysis.Token();
-    }
-
+    List list = new ArrayList();
+    final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();
+    org.apache.lucene.analysis.Token nextToken;
     int positionCount = 0;
     boolean severalTokensAtSamePosition = false;
 
-    if (useNewAPI) {
-      if (termAtt != null) {
-        try {
-          while (buffer.incrementToken()) {
-            numTokens++;
-            int positionIncrement = (posIncrAtt != null) ? posIncrAtt.getPositionIncrement() : 1;
-            if (positionIncrement != 0) {
-              positionCount += positionIncrement;
-            } else {
-              severalTokensAtSamePosition = true;
-            }
-          }
-        } catch (IOException e) {
-          // ignore
-        }
+    while (true) {
+      try {
+        nextToken = source.next(reusableToken);
       }
-    } else {
-      while (true) {
-        try {
-          nextToken = buffer.next(reusableToken);
-        }
-        catch (IOException e) {
-          nextToken = null;
-        }
-        if (nextToken == null)
-          break;
-        numTokens++;
-        if (nextToken.getPositionIncrement() != 0)
-          positionCount += nextToken.getPositionIncrement();
-        else
-          severalTokensAtSamePosition = true;
+      catch (IOException e) {
+        nextToken = null;
       }
+      if (nextToken == null)
+        break;
+      list.add(nextToken.clone());
+      if (nextToken.getPositionIncrement() != 0)
+        positionCount += nextToken.getPositionIncrement();
+      else
+        severalTokensAtSamePosition = true;
     }
     try {
-      // rewind the buffer stream
-      buffer.reset();
-
-      // close original stream - all tokens buffered
       source.close();
     }
     catch (IOException e) {
       // ignore
     }
 
-    if (numTokens == 0)
+    if (list.size() == 0)
       return null;
-    else if (numTokens == 1) {
-      String term = null;
-      try {
-
-        if (useNewAPI) {
-          boolean hasNext = buffer.incrementToken();
-          assert hasNext == true;
-          term = termAtt.term();
-        } else {
-          nextToken = buffer.next(reusableToken);
-          assert nextToken != null;
-          term = nextToken.term();
-        }
-      } catch (IOException e) {
-        // safe to ignore, because we know the number of tokens
-      }
-      return newTermQuery(new Term(field, term));
+    else if (list.size() == 1) {
+      nextToken = (org.apache.lucene.analysis.Token) list.get(0);
+      return newTermQuery(new Term(field, nextToken.term()));
     } else {
       if (severalTokensAtSamePosition) {
         if (positionCount == 1) {
           // no phrase query:
           BooleanQuery q = newBooleanQuery(true);
-          for (int i = 0; i < numTokens; i++) {
-            String term = null;
-            try {
-              if (useNewAPI) {
-                boolean hasNext = buffer.incrementToken();
-                assert hasNext == true;
-                term = termAtt.term();
-              } else {
-                nextToken = buffer.next(reusableToken);
-                assert nextToken != null;
-                term = nextToken.term();
-              }
-            } catch (IOException e) {
-              // safe to ignore, because we know the number of tokens
-            }
-
+          for (int i = 0; i < list.size(); i++) {
+            nextToken = (org.apache.lucene.analysis.Token) list.get(i);
             Query currentQuery = newTermQuery(
-                new Term(field, term));
+                new Term(field, nextToken.term()));
             q.add(currentQuery, BooleanClause.Occur.SHOULD);
           }
           return q;
@@ -651,28 +560,9 @@
           mpq.setSlop(phraseSlop);
           List multiTerms = new ArrayList();
           int position = -1;
-          for (int i = 0; i < numTokens; i++) {
-            String term = null;
-            int positionIncrement = 1;
-            try {
-              if (useNewAPI) {
-                boolean hasNext = buffer.incrementToken();
-                assert hasNext == true;
-                term = termAtt.term();
-                if (posIncrAtt != null) {
-                  positionIncrement = posIncrAtt.getPositionIncrement();
-                }
-              } else {
-                nextToken = buffer.next(reusableToken);
-                assert nextToken != null;
-                term = nextToken.term();
-                positionIncrement = nextToken.getPositionIncrement();
-              }
-            } catch (IOException e) {
-              // safe to ignore, because we know the number of tokens
-            }
-
-            if (positionIncrement > 0 && multiTerms.size() > 0) {
+          for (int i = 0; i < list.size(); i++) {
+            nextToken = (org.apache.lucene.analysis.Token) list.get(i);
+            if (nextToken.getPositionIncrement() > 0 && multiTerms.size() > 0) {
               if (enablePositionIncrements) {
                 mpq.add((Term[])multiTerms.toArray(new Term[0]),position);
               } else {
@@ -680,8 +570,8 @@
               }
               multiTerms.clear();
             }
-            position += positionIncrement;
-            multiTerms.add(new Term(field, term));
+            position += nextToken.getPositionIncrement();
+            multiTerms.add(new Term(field, nextToken.term()));
           }
           if (enablePositionIncrements) {
             mpq.add((Term[])multiTerms.toArray(new Term[0]),position);
@@ -695,36 +585,13 @@
         PhraseQuery pq = newPhraseQuery();
         pq.setSlop(phraseSlop);
         int position = -1;
-
-
-        for (int i = 0; i < numTokens; i++) {
-          String term = null;
-          int positionIncrement = 1;
-
-          try {
-            if (useNewAPI) {
-
-              boolean hasNext = buffer.incrementToken();
-              assert hasNext == true;
-              term = termAtt.term();
-              if (posIncrAtt != null) {
-                positionIncrement = posIncrAtt.getPositionIncrement();
-              }
-            } else {
-              nextToken = buffer.next(reusableToken);
-              assert nextToken != null;
-              term = nextToken.term();
-              positionIncrement = nextToken.getPositionIncrement();
-            }
-          } catch (IOException e) {
-            // safe to ignore, because we know the number of tokens
-          }
-
+        for (int i = 0; i < list.size(); i++) {
+          nextToken = (org.apache.lucene.analysis.Token) list.get(i);
           if (enablePositionIncrements) {
-            position += positionIncrement;
-            pq.add(new Term(field, term),position);
+            position += nextToken.getPositionIncrement();
+            pq.add(new Term(field, nextToken.term()),position);
           } else {
-            pq.add(new Term(field, term));
+            pq.add(new Term(field, nextToken.term()));
           }
         }
         return pq;
@@ -733,7 +600,6 @@
   }
 
 
-
   /**
    * Base implementation delegates to {@link #getFieldQuery(String,String)}.
    * This method may be overridden, for example, to return
@@ -852,9 +718,7 @@
    * @return new PrefixQuery instance
    */
   protected Query newPrefixQuery(Term prefix){
-    PrefixQuery query = new PrefixQuery(prefix);
-    query.setConstantScoreRewrite(constantScoreRewrite);
-    return query;
+    return new PrefixQuery(prefix);
   }
 
   /**
@@ -865,7 +729,6 @@
    * @return new FuzzyQuery Instance
    */
   protected Query newFuzzyQuery(Term term, float minimumSimilarity, int prefixLength) {
-    // FuzzyQuery doesn't yet allow constant score rewrite
     return new FuzzyQuery(term,minimumSimilarity,prefixLength);
   }
 
@@ -878,16 +741,17 @@
    * @return new RangeQuery instance
    */
   protected Query newRangeQuery(String field, String part1, String part2, boolean inclusive) {
-    RangeQuery query;
-
-    if (constantScoreRewrite) {
-      // TODO: remove in Lucene 3.0
-      query = new ConstantScoreRangeQuery(field, part1, part2, inclusive, inclusive, rangeCollator);
-    } else {
-      query = new RangeQuery(field, part1, part2, inclusive, inclusive, rangeCollator);
+    if(useOldRangeQuery)
+    {
+      return new RangeQuery(new Term(field, part1),
+                            new Term(field, part2),
+                            inclusive, rangeCollator);
+    }
+    else
+    {
+      return new ConstantScoreRangeQuery
+        (field, part1, part2, inclusive, inclusive, rangeCollator);
     }
-    query.setConstantScoreRewrite(constantScoreRewrite);
-    return query;
   }
 
   /**
@@ -904,9 +768,7 @@
    * @return new WildcardQuery instance
    */
   protected Query newWildcardQuery(Term t) {
-    WildcardQuery query = new WildcardQuery(t);
-    query.setConstantScoreRewrite(constantScoreRewrite);
-    return query;
+    return new WildcardQuery(t);
   }
 
   /**
@@ -1383,6 +1245,7 @@
   boolean prefix = false;
   boolean wildcard = false;
   boolean fuzzy = false;
+  boolean rangein = false;
   Query q;
     switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
     case STAR:
@@ -1627,6 +1490,12 @@
     finally { jj_save(0, xla); }
   }
 
+  private boolean jj_3R_3() {
+    if (jj_scan_token(STAR)) return true;
+    if (jj_scan_token(COLON)) return true;
+    return false;
+  }
+
   private boolean jj_3R_2() {
     if (jj_scan_token(TERM)) return true;
     if (jj_scan_token(COLON)) return true;
@@ -1643,12 +1512,6 @@
     return false;
   }
 
-  private boolean jj_3R_3() {
-    if (jj_scan_token(STAR)) return true;
-    if (jj_scan_token(COLON)) return true;
-    return false;
-  }
-
   /** Generated Token Manager. */
   public QueryParserTokenManager token_source;
   /** Current token. */

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/QueryParser.jj
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/QueryParser.jj?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/QueryParser.jj (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/QueryParser.jj Sat Jul  4 20:08:54 2009
@@ -27,8 +27,8 @@
 
 import java.io.IOException;
 import java.io.StringReader;
-import java.text.Collator;
 import java.text.DateFormat;
+import java.text.Collator;
 import java.util.ArrayList;
 import java.util.Calendar;
 import java.util.Date;
@@ -39,10 +39,7 @@
 import java.util.Vector;
 
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.CachingTokenFilter;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.document.DateField;
 import org.apache.lucene.document.DateTools;
 import org.apache.lucene.index.Term;
@@ -121,6 +118,10 @@
  * </p>
  *
  * <p>Note that QueryParser is <em>not</em> thread-safe.</p>
+ *
+ * @author Brian Goetz
+ * @author Peter Halacsy
+ * @author Tatu Saloranta
  */
 public class QueryParser {
 
@@ -143,7 +144,7 @@
   private Operator operator = OR_OPERATOR;
 
   boolean lowercaseExpandedTerms = true;
-  boolean constantScoreRewrite= true;
+  boolean useOldRangeQuery= false;
   boolean allowLeadingWildcard = false;
   boolean enablePositionIncrements = false;
 
@@ -160,7 +161,7 @@
   Map fieldToDateResolution = null;
 
   // The collator to use when determining range inclusion,
-  // for use when constructing RangeQuerys.
+  // for use when constructing RangeQuerys and ConstantScoreRangeQuerys.
   Collator rangeCollator = null;
 
   /** The default operator for parsing queries. 
@@ -350,40 +351,24 @@
   }
 
   /**
-   * @deprecated Please use {@link #setConstantScoreRewrite} instead.
-   */
-  public void setUseOldRangeQuery(boolean useOldRangeQuery) {
-    constantScoreRewrite = !useOldRangeQuery;
-  }
-
-
-  /**
-   * @deprecated Please use {@link #getConstantScoreRewrite} instead.
-   */
-  public boolean getUseOldRangeQuery() {
-    return !constantScoreRewrite;
-  }
-
-  /**
-   * By default QueryParser uses constant-score rewriting
-   * when creating a PrefixQuery, WildcardQuery or RangeQuery. This implementation is generally preferable because it 
-   * a) Runs faster b) Does not have the scarcity of terms unduly influence score 
+   * By default QueryParser uses new ConstantScoreRangeQuery in preference to RangeQuery
+   * for range queries. This implementation is generally preferable because it 
+   * a) Runs faster b) Does not have the scarcity of range terms unduly influence score 
    * c) avoids any "TooManyBooleanClauses" exception.
-   * However, if your application really needs to use the
-   * old-fashioned BooleanQuery expansion rewriting and the above
-   * points are not relevant then set this option to <code>true</code>
+   * However, if your application really needs to use the old-fashioned RangeQuery and the above
+   * points are not required then set this option to <code>true</code>
    * Default is <code>false</code>.
    */
-  public void setConstantScoreRewrite(boolean v) {
-    constantScoreRewrite = v;
+  public void setUseOldRangeQuery(boolean useOldRangeQuery) {
+    this.useOldRangeQuery = useOldRangeQuery;
   }
 
 
   /**
-   * @see #setConstantScoreRewrite(boolean)
+   * @see #setUseOldRangeQuery(boolean)
    */
-  public boolean getConstantScoreRewrite() {
-    return constantScoreRewrite;
+  public boolean getUseOldRangeQuery() {
+    return useOldRangeQuery;
   }
 
   /**
@@ -457,7 +442,9 @@
   
   /** 
    * Sets the collator used to determine index term inclusion in ranges
-   * for RangeQuerys.
+   * specified either for ConstantScoreRangeQuerys or RangeQuerys (if
+   * {@link #setUseOldRangeQuery(boolean)} is called with a <code>true</code>
+   * value.)
    * <p/>
    * <strong>WARNING:</strong> Setting the rangeCollator to a non-null
    * collator using this method will cause every single index Term in the
@@ -466,6 +453,7 @@
    * be very slow.
    *
    *  @param rc  the collator to use when constructing RangeQuerys
+   *             and ConstantScoreRangeQuerys
    */
   public void setRangeCollator(Collator rc) {
     rangeCollator = rc;
@@ -473,7 +461,9 @@
   
   /**
    * @return the collator used to determine index term inclusion in ranges
-   * for RangeQuerys.
+   *  specified either for ConstantScoreRangeQuerys or RangeQuerys (if
+   *  {@link #setUseOldRangeQuery(boolean)} is called with a <code>true</code>
+   *  value.)
    */
   public Collator getRangeCollator() {
     return rangeCollator;
@@ -545,126 +535,48 @@
     // PhraseQuery, or nothing based on the term count
 
     TokenStream source = analyzer.tokenStream(field, new StringReader(queryText));
-    CachingTokenFilter buffer = new CachingTokenFilter(source);
-    TermAttribute termAtt = null;
-    PositionIncrementAttribute posIncrAtt = null;
-    int numTokens = 0;
-
-    org.apache.lucene.analysis.Token reusableToken = null;
-    org.apache.lucene.analysis.Token nextToken = null;
-
-    
-    boolean useNewAPI = TokenStream.useNewAPI();
-    
-    if (useNewAPI) {
-      boolean success = false;
-      try {
-        buffer.start();
-        success = true;
-      } catch (IOException e) {
-        // success==false if we hit an exception
-      }
-      if (success) {
-    	if (buffer.hasAttribute(TermAttribute.class)) {
-    	  termAtt = (TermAttribute) buffer.getAttribute(TermAttribute.class);
-    	}
-        if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
-          posIncrAtt = (PositionIncrementAttribute) buffer.getAttribute(PositionIncrementAttribute.class);
-        }
-      }
-    } else {
-      reusableToken = new org.apache.lucene.analysis.Token();      
-    }
-    
+    List list = new ArrayList();
+    final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();
+    org.apache.lucene.analysis.Token nextToken;
     int positionCount = 0;
     boolean severalTokensAtSamePosition = false;
 
-    if (useNewAPI) {
-      if (termAtt != null) {
-        try {
-          while (buffer.incrementToken()) {
-            numTokens++;
-            int positionIncrement = (posIncrAtt != null) ? posIncrAtt.getPositionIncrement() : 1;
-            if (positionIncrement != 0) {
-              positionCount += positionIncrement;
-            } else {
-              severalTokensAtSamePosition = true;
-            }
-          }
-        } catch (IOException e) {
-          // ignore
-        }
+    while (true) {
+      try {
+        nextToken = source.next(reusableToken);
       }
-    } else {
-      while (true) {
-        try {
-          nextToken = buffer.next(reusableToken);
-        }
-        catch (IOException e) {
-          nextToken = null;
-        }
-        if (nextToken == null)
-          break;
-        numTokens++;
-        if (nextToken.getPositionIncrement() != 0)
-          positionCount += nextToken.getPositionIncrement();
-        else
-          severalTokensAtSamePosition = true;
-      }      
+      catch (IOException e) {
+        nextToken = null;
+      }
+      if (nextToken == null)
+        break;
+      list.add(nextToken.clone());
+      if (nextToken.getPositionIncrement() != 0)
+        positionCount += nextToken.getPositionIncrement();
+      else
+        severalTokensAtSamePosition = true;
     }
     try {
-      // rewind the buffer stream
-      buffer.reset();
-
-      // close original stream - all tokens buffered
       source.close();
     }
     catch (IOException e) {
       // ignore
     }
-    
-    if (numTokens == 0)
-      return null;
-    else if (numTokens == 1) {
-      String term = null;
-      try {
 
-        if (useNewAPI) {
-          boolean hasNext = buffer.incrementToken();
-          assert hasNext == true;
-          term = termAtt.term();
-        } else {
-          nextToken = buffer.next(reusableToken);
-          assert nextToken != null;
-          term = nextToken.term();
-        }
-      } catch (IOException e) {
-        // safe to ignore, because we know the number of tokens
-      }
-      return newTermQuery(new Term(field, term));
+    if (list.size() == 0)
+      return null;
+    else if (list.size() == 1) {
+      nextToken = (org.apache.lucene.analysis.Token) list.get(0);
+      return newTermQuery(new Term(field, nextToken.term()));
     } else {
       if (severalTokensAtSamePosition) {
         if (positionCount == 1) {
           // no phrase query:
           BooleanQuery q = newBooleanQuery(true);
-          for (int i = 0; i < numTokens; i++) {
-            String term = null;
-            try {
-              if (useNewAPI) {
-                boolean hasNext = buffer.incrementToken();
-                assert hasNext == true;
-                term = termAtt.term();
-              } else {
-                nextToken = buffer.next(reusableToken);
-                assert nextToken != null;
-                term = nextToken.term();
-              }            
-            } catch (IOException e) {
-              // safe to ignore, because we know the number of tokens
-            }
-            
+          for (int i = 0; i < list.size(); i++) {
+            nextToken = (org.apache.lucene.analysis.Token) list.get(i);
             Query currentQuery = newTermQuery(
-                new Term(field, term));
+                new Term(field, nextToken.term()));
             q.add(currentQuery, BooleanClause.Occur.SHOULD);
           }
           return q;
@@ -675,28 +587,9 @@
           mpq.setSlop(phraseSlop);
           List multiTerms = new ArrayList();
           int position = -1;
-          for (int i = 0; i < numTokens; i++) {
-            String term = null;
-            int positionIncrement = 1;
-            try {
-              if (useNewAPI) {
-                boolean hasNext = buffer.incrementToken();
-                assert hasNext == true;
-                term = termAtt.term();
-                if (posIncrAtt != null) {
-                  positionIncrement = posIncrAtt.getPositionIncrement();
-                }
-              } else {
-                nextToken = buffer.next(reusableToken);
-                assert nextToken != null;
-                term = nextToken.term();
-                positionIncrement = nextToken.getPositionIncrement();
-              }
-            } catch (IOException e) {
-              // safe to ignore, because we know the number of tokens
-            }
-
-            if (positionIncrement > 0 && multiTerms.size() > 0) {
+          for (int i = 0; i < list.size(); i++) {
+            nextToken = (org.apache.lucene.analysis.Token) list.get(i);
+            if (nextToken.getPositionIncrement() > 0 && multiTerms.size() > 0) {
               if (enablePositionIncrements) {
                 mpq.add((Term[])multiTerms.toArray(new Term[0]),position);
               } else {
@@ -704,8 +597,8 @@
               }
               multiTerms.clear();
             }
-            position += positionIncrement;
-            multiTerms.add(new Term(field, term));
+            position += nextToken.getPositionIncrement();
+            multiTerms.add(new Term(field, nextToken.term()));
           }
           if (enablePositionIncrements) {
             mpq.add((Term[])multiTerms.toArray(new Term[0]),position);
@@ -719,36 +612,13 @@
         PhraseQuery pq = newPhraseQuery();
         pq.setSlop(phraseSlop);
         int position = -1;
-        
-        
-        for (int i = 0; i < numTokens; i++) {
-          String term = null;
-          int positionIncrement = 1;
-
-          try {  
-            if (useNewAPI) {
-              
-              boolean hasNext = buffer.incrementToken();
-              assert hasNext == true;
-              term = termAtt.term();
-              if (posIncrAtt != null) {
-                positionIncrement = posIncrAtt.getPositionIncrement();
-              }
-            } else {
-              nextToken = buffer.next(reusableToken);
-              assert nextToken != null;
-              term = nextToken.term();
-              positionIncrement = nextToken.getPositionIncrement();
-            }
-          } catch (IOException e) {
-            // safe to ignore, because we know the number of tokens
-          }
-
+        for (int i = 0; i < list.size(); i++) {
+          nextToken = (org.apache.lucene.analysis.Token) list.get(i);
           if (enablePositionIncrements) {
-            position += positionIncrement;
-            pq.add(new Term(field, term),position);
+            position += nextToken.getPositionIncrement();
+            pq.add(new Term(field, nextToken.term()),position);
           } else {
-            pq.add(new Term(field, term));
+            pq.add(new Term(field, nextToken.term()));
           }
         }
         return pq;
@@ -757,7 +627,6 @@
   }
 
 
-
   /**
    * Base implementation delegates to {@link #getFieldQuery(String,String)}.
    * This method may be overridden, for example, to return
@@ -876,9 +745,7 @@
    * @return new PrefixQuery instance
    */
   protected Query newPrefixQuery(Term prefix){
-    PrefixQuery query = new PrefixQuery(prefix);
-    query.setConstantScoreRewrite(constantScoreRewrite);
-    return query;
+    return new PrefixQuery(prefix);
   }
  
   /**
@@ -889,7 +756,6 @@
    * @return new FuzzyQuery Instance
    */
   protected Query newFuzzyQuery(Term term, float minimumSimilarity, int prefixLength) {
-    // FuzzyQuery doesn't yet allow constant score rewrite
     return new FuzzyQuery(term,minimumSimilarity,prefixLength);
   }
 
@@ -902,16 +768,17 @@
    * @return new RangeQuery instance
    */
   protected Query newRangeQuery(String field, String part1, String part2, boolean inclusive) {
-    RangeQuery query;  
-  
-    if (constantScoreRewrite) {
-      // TODO: remove in Lucene 3.0
-      query = new ConstantScoreRangeQuery(field, part1, part2, inclusive, inclusive, rangeCollator);
-    } else {
-      query = new RangeQuery(field, part1, part2, inclusive, inclusive, rangeCollator);
+    if(useOldRangeQuery)
+    {
+      return new RangeQuery(new Term(field, part1),
+                            new Term(field, part2),
+                            inclusive, rangeCollator);
+    }
+    else
+    {
+      return new ConstantScoreRangeQuery
+        (field, part1, part2, inclusive, inclusive, rangeCollator);
     }
-    query.setConstantScoreRewrite(constantScoreRewrite);
-    return query;
   }
   
   /**
@@ -928,9 +795,7 @@
    * @return new WildcardQuery instance
    */
   protected Query newWildcardQuery(Term t) {
-    WildcardQuery query = new WildcardQuery(t);
-    query.setConstantScoreRewrite(constantScoreRewrite);
-    return query;
+    return new WildcardQuery(t); 
   }
 
   /**
@@ -1382,6 +1247,7 @@
   boolean prefix = false;
   boolean wildcard = false;
   boolean fuzzy = false;
+  boolean rangein = false;
   Query q;
 }
 {

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java Sat Jul  4 20:08:54 2009
@@ -2,8 +2,8 @@
 package org.apache.lucene.queryParser;
 import java.io.IOException;
 import java.io.StringReader;
-import java.text.Collator;
 import java.text.DateFormat;
+import java.text.Collator;
 import java.util.ArrayList;
 import java.util.Calendar;
 import java.util.Date;
@@ -13,10 +13,7 @@
 import java.util.Map;
 import java.util.Vector;
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.CachingTokenFilter;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.document.DateField;
 import org.apache.lucene.document.DateTools;
 import org.apache.lucene.index.Term;

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/Token.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/Token.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/Token.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/Token.java Sat Jul  4 20:08:54 2009
@@ -121,4 +121,4 @@
   }
 
 }
-/* JavaCC - OriginalChecksum=37b1923f964a5a434f5ea3d6952ff200 (do not edit this line) */
+/* JavaCC - OriginalChecksum=c147cc166a7cf8812c7c39bc8c5eb868 (do not edit this line) */

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/TokenMgrError.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/TokenMgrError.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/TokenMgrError.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/queryParser/TokenMgrError.java Sat Jul  4 20:08:54 2009
@@ -137,4 +137,4 @@
       this(LexicalError(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason);
    }
 }
-/* JavaCC - OriginalChecksum=55cddb2336a66b376c0bb59d916b326d (do not edit this line) */
+/* JavaCC - OriginalChecksum=186d5bcc64733844c7daab5ad5a6e349 (do not edit this line) */

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/ConstantScoreRangeQuery.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/ConstantScoreRangeQuery.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/ConstantScoreRangeQuery.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/ConstantScoreRangeQuery.java Sat Jul  4 20:08:54 2009
@@ -17,6 +17,9 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.index.IndexReader;
+
+import java.io.IOException;
 import java.text.Collator;
 
 /**
@@ -29,31 +32,121 @@
  * Either or both endpoints may be open.  Open endpoints may not be exclusive
  * (you can't select all but the first or last term without explicitly specifying the term to exclude.)
  *
- * @deprecated Please use {@link RangeQuery}, and call
- * {@link RangeQuery#setConstantScoreRewrite}, instead.
+ *
  * @version $Id$
  */
-public class ConstantScoreRangeQuery extends RangeQuery
+
+public class ConstantScoreRangeQuery extends Query
 {
+  private final String fieldName;
+  private final String lowerVal;
+  private final String upperVal;
+  private final boolean includeLower;
+  private final boolean includeUpper;
+  private Collator collator;
+
 
   public ConstantScoreRangeQuery(String fieldName, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper)
   {
-    super(fieldName, lowerVal, upperVal, includeLower, includeUpper);
-    this.constantScoreRewrite = true;
+    // do a little bit of normalization...
+    // open ended range queries should always be inclusive.
+    if (lowerVal==null) {
+      includeLower=true;
+    } else if (includeLower && lowerVal.equals("")) {
+      lowerVal=null;
+    }
+    if (upperVal==null) {
+      includeUpper=true;
+    }
+
+
+    this.fieldName = fieldName.intern();  // intern it, just like terms...
+    this.lowerVal = lowerVal;
+    this.upperVal = upperVal;
+    this.includeLower = includeLower;
+    this.includeUpper = includeUpper;
   }
 
   public ConstantScoreRangeQuery(String fieldName, String lowerVal,
                                  String upperVal, boolean includeLower,
-                                 boolean includeUpper, Collator collator) {
-    super(fieldName, lowerVal, upperVal, includeLower, includeUpper, collator);
-    this.constantScoreRewrite = true;
+                                 boolean includeUpper, Collator collator)
+  {
+    this(fieldName, lowerVal, upperVal, includeLower, includeUpper);
+    this.collator = collator;
   }
 
-  public String getLowerVal() {
-    return getLowerTermText();
+  /** Returns the field name for this query */
+  public String getField() { return fieldName; }
+  /** Returns the value of the lower endpoint of this range query, null if open ended */
+  public String getLowerVal() { return lowerVal; }
+  /** Returns the value of the upper endpoint of this range query, null if open ended */
+  public String getUpperVal() { return upperVal; }
+  /** Returns <code>true</code> if the lower endpoint is inclusive */
+  public boolean includesLower() { return includeLower; }
+  /** Returns <code>true</code> if the upper endpoint is inclusive */
+  public boolean includesUpper() { return includeUpper; }
+
+  public Query rewrite(IndexReader reader) throws IOException {
+    // Map to RangeFilter semantics which are slightly different...
+    RangeFilter rangeFilt = new RangeFilter
+        (fieldName, lowerVal != null?lowerVal:"", upperVal,
+         lowerVal==""?false:includeLower, upperVal==null?false:includeUpper,
+         collator);
+    Query q = new ConstantScoreQuery(rangeFilt);
+    q.setBoost(getBoost());
+    return q;
   }
 
-  public String getUpperVal() {
-    return getUpperTermText();
-  }
+    /** Prints a user-readable version of this query. */
+    public String toString(String field)
+    {
+        StringBuffer buffer = new StringBuffer();
+        if (!getField().equals(field))
+        {
+            buffer.append(getField());
+            buffer.append(":");
+        }
+        buffer.append(includeLower ? '[' : '{');
+        buffer.append(lowerVal != null ? lowerVal : "*");
+        buffer.append(" TO ");
+        buffer.append(upperVal != null ? upperVal : "*");
+        buffer.append(includeUpper ? ']' : '}');
+        if (getBoost() != 1.0f)
+        {
+            buffer.append("^");
+            buffer.append(Float.toString(getBoost()));
+        }
+        return buffer.toString();
+    }
+
+    /** Returns true if <code>o</code> is equal to this. */
+    public boolean equals(Object o) {
+        if (this == o) return true;
+        if (!(o instanceof ConstantScoreRangeQuery)) return false;
+        ConstantScoreRangeQuery other = (ConstantScoreRangeQuery) o;
+
+        if (this.fieldName != other.fieldName  // interned comparison
+            || this.includeLower != other.includeLower
+            || this.includeUpper != other.includeUpper
+            || (this.collator != null && ! this.collator.equals(other.collator))
+           ) { return false; }
+        if (this.lowerVal != null ? !this.lowerVal.equals(other.lowerVal) : other.lowerVal != null) return false;
+        if (this.upperVal != null ? !this.upperVal.equals(other.upperVal) : other.upperVal != null) return false;
+        return this.getBoost() == other.getBoost();
+    }
+
+    /** Returns a hash code value for this object.*/
+    public int hashCode() {
+      int h = Float.floatToIntBits(getBoost()) ^ fieldName.hashCode();
+      // hashCode of "" is 0, so don't use that for null...
+      h ^= lowerVal != null ? lowerVal.hashCode() : 0x965a965a;
+      // don't just XOR upperVal with out mixing either it or h, as it will cancel
+      // out lowerVal if they are equal.
+      h ^= (h << 17) | (h >>> 16);  // a reversible (one to one) 32 bit mapping mix
+      h ^= (upperVal != null ? (upperVal.hashCode()) : 0x5a695a69);
+      h ^= (includeLower ? 0x665599aa : 0)
+         ^ (includeUpper ? 0x99aa5566 : 0);
+      h ^= collator != null ? collator.hashCode() : 0;
+      return h;
+    }
 }

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/FieldCache.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/FieldCache.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/FieldCache.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/FieldCache.java Sat Jul  4 20:08:54 2009
@@ -38,29 +38,7 @@
 
   /** Expert: Stores term text values and document ordering data. */
   public static class StringIndex {
-	  
-    public int binarySearchLookup(String key) {
-      // this special case is the reason that Arrays.binarySearch() isn't useful.
-      if (key == null)
-        return 0;
-	  
-      int low = 1;
-      int high = lookup.length-1;
-
-      while (low <= high) {
-        int mid = (low + high) >> 1;
-        int cmp = lookup[mid].compareTo(key);
-
-        if (cmp < 0)
-          low = mid + 1;
-        else if (cmp > 0)
-          high = mid - 1;
-        else
-          return mid; // key found
-      }
-      return -(low + 1);  // key not found.
-    }
-	
+
     /** All the term values, in natural order. */
     public final String[] lookup;
 

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/FuzzyQuery.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/FuzzyQuery.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/FuzzyQuery.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/FuzzyQuery.java Sat Jul  4 20:08:54 2009
@@ -99,10 +99,6 @@
   protected FilteredTermEnum getEnum(IndexReader reader) throws IOException {
     return new FuzzyTermEnum(reader, getTerm(), minimumSimilarity, prefixLength);
   }
-
-  public void setConstantScoreRewrite(boolean constantScoreRewrite) {
-    throw new UnsupportedOperationException("FuzzyQuery cannot rewrite to a constant score query");
-  }
   
   public Query rewrite(IndexReader reader) throws IOException {
     FilteredTermEnum enumerator = getEnum(reader);

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/MultiTermQuery.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/MultiTermQuery.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/MultiTermQuery.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/MultiTermQuery.java Sat Jul  4 20:08:54 2009
@@ -18,197 +18,81 @@
  */
 
 import java.io.IOException;
-import java.util.BitSet;
 
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermDocs;
-import org.apache.lucene.index.TermEnum;
-import org.apache.lucene.util.OpenBitSet;
 import org.apache.lucene.util.ToStringUtils;
 
 /**
  * A {@link Query} that matches documents containing a subset of terms provided
  * by a {@link FilteredTermEnum} enumeration.
  * <P>
- * <code>MultiTermQuery</code> is not designed to be used by itself. <BR>
+ * <code>MultiTermQuery</code> is not designed to be used by itself.
+ * <BR>
  * The reason being that it is not intialized with a {@link FilteredTermEnum}
  * enumeration. A {@link FilteredTermEnum} enumeration needs to be provided.
  * <P>
  * For example, {@link WildcardQuery} and {@link FuzzyQuery} extend
  * <code>MultiTermQuery</code> to provide {@link WildcardTermEnum} and
  * {@link FuzzyTermEnum}, respectively.
- * 
- * The pattern Term may be null. A query that uses a null pattern Term should
- * override equals and hashcode.
  */
 public abstract class MultiTermQuery extends Query {
-  protected Term term;
-  protected boolean constantScoreRewrite = false;
+    private Term term;
 
-  /** Constructs a query for terms matching <code>term</code>. */
-  public MultiTermQuery(Term term) {
-    this.term = term;
-  }
-
-  /**
-   * Constructs a query matching terms that cannot be represented with a single
-   * Term.
-   */
-  public MultiTermQuery() {
-  }
-
-  /** Returns the pattern term. */
-  public Term getTerm() {
-    return term;
-  }
+    /** Constructs a query for terms matching <code>term</code>. */
+    public MultiTermQuery(Term term) {
+        this.term = term;
+    }
 
-  /** Construct the enumeration to be used, expanding the pattern term. */
-  protected abstract FilteredTermEnum getEnum(IndexReader reader)
-      throws IOException;
+    /** Returns the pattern term. */
+    public Term getTerm() { return term; }
 
-  protected Filter getFilter() {
-    return new MultiTermFilter(this);
-  }
+    /** Construct the enumeration to be used, expanding the pattern term. */
+    protected abstract FilteredTermEnum getEnum(IndexReader reader)
+      throws IOException;
 
-  public Query rewrite(IndexReader reader) throws IOException {
-    if (!constantScoreRewrite) {
+    public Query rewrite(IndexReader reader) throws IOException {
       FilteredTermEnum enumerator = getEnum(reader);
       BooleanQuery query = new BooleanQuery(true);
       try {
         do {
           Term t = enumerator.term();
           if (t != null) {
-            TermQuery tq = new TermQuery(t); // found a match
+            TermQuery tq = new TermQuery(t);      // found a match
             tq.setBoost(getBoost() * enumerator.difference()); // set the boost
-            query.add(tq, BooleanClause.Occur.SHOULD); // add to query
+            query.add(tq, BooleanClause.Occur.SHOULD);          // add to query
           }
         } while (enumerator.next());
       } finally {
         enumerator.close();
       }
       return query;
-    } else {
-      Query query = new ConstantScoreQuery(getFilter());
-      query.setBoost(getBoost());
-      return query;
     }
-  }
-
-  /** Prints a user-readable version of this query. */
-  public String toString(String field) {
-    StringBuffer buffer = new StringBuffer();
-    if (term != null) {
-      if (!term.field().equals(field)) {
-        buffer.append(term.field());
-        buffer.append(":");
-      }
-      buffer.append(term.text());
-    } else {
-      buffer.append("termPattern:unknown");
-    }
-    buffer.append(ToStringUtils.boost(getBoost()));
-    return buffer.toString();
-  }
-
-  public boolean getConstantScoreRewrite() {
-    return constantScoreRewrite;
-  }
-
-  public void setConstantScoreRewrite(boolean constantScoreRewrite) {
-    this.constantScoreRewrite = constantScoreRewrite;
-  }
-
-  public boolean equals(Object o) {
-    if (o == null || term == null) {
-      throw new UnsupportedOperationException(
-          "MultiTermQuerys that do not use a pattern term need to override equals/hashcode");
-    }
-
-    if (this == o)
-      return true;
-    if (!(o instanceof MultiTermQuery))
-      return false;
-
-    final MultiTermQuery multiTermQuery = (MultiTermQuery) o;
-
-    if (!term.equals(multiTermQuery.term))
-      return false;
-
-    return getBoost() == multiTermQuery.getBoost();
-  }
-
-  public int hashCode() {
-    if (term == null) {
-      throw new UnsupportedOperationException(
-          "MultiTermQuerys that do not use a pattern term need to override equals/hashcode");
-    }
-    return term.hashCode() + Float.floatToRawIntBits(getBoost());
-  }
-
-  static class MultiTermFilter extends Filter {
-    MultiTermQuery mtq;
 
-    abstract class TermGenerator {
-      public void generate(IndexReader reader) throws IOException {
-        TermEnum enumerator = mtq.getEnum(reader);
-        TermDocs termDocs = reader.termDocs();
-        try {
-          do {
-            Term term = enumerator.term();
-            if (term == null)
-              break;
-            termDocs.seek(term);
-            while (termDocs.next()) {
-              handleDoc(termDocs.doc());
-            }
-          } while (enumerator.next());
-        } finally {
-          termDocs.close();
-          enumerator.close();
+    /** Prints a user-readable version of this query. */
+    public String toString(String field) {
+        StringBuffer buffer = new StringBuffer();
+        if (!term.field().equals(field)) {
+            buffer.append(term.field());
+            buffer.append(":");
         }
-      }
-      abstract public void handleDoc(int doc);
-    }
-    
-    public MultiTermFilter(MultiTermQuery mtq) {
-      this.mtq = mtq;
-    }
-
-    public BitSet bits(IndexReader reader) throws IOException {
-      final BitSet bitSet = new BitSet(reader.maxDoc());
-      new TermGenerator() {
-        public void handleDoc(int doc) {
-          bitSet.set(doc);
-        }
-      }.generate(reader);
-      return bitSet;
+        buffer.append(term.text());
+        buffer.append(ToStringUtils.boost(getBoost()));
+        return buffer.toString();
     }
 
-    public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
-      final OpenBitSet bitSet = new OpenBitSet(reader.maxDoc());
-      new TermGenerator() {
-        public void handleDoc(int doc) {
-          bitSet.set(doc);
-        }
-      }.generate(reader);
-
-      return bitSet;
-    }
-      
     public boolean equals(Object o) {
+      if (this == o) return true;
+      if (!(o instanceof MultiTermQuery)) return false;
 
-      if (this == o)
-        return true;
-      if (!(o instanceof MultiTermFilter))
-        return false;
+      final MultiTermQuery multiTermQuery = (MultiTermQuery) o;
 
-      final MultiTermFilter filter = (MultiTermFilter) o;
-      return mtq.equals(filter.mtq);
+      if (!term.equals(multiTermQuery.term)) return false;
+
+      return getBoost() == multiTermQuery.getBoost();
     }
-      
+
     public int hashCode() {
-      return mtq.hashCode();
+      return term.hashCode() + Float.floatToRawIntBits(getBoost());
     }
-  }
 }

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/PrefixFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/PrefixFilter.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/PrefixFilter.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/PrefixFilter.java Sat Jul  4 20:08:54 2009
@@ -17,28 +17,25 @@
  * limitations under the License.
  */
 
-import java.io.IOException;
-import java.util.BitSet;
-
-import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.util.OpenBitSet;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.TermEnum;
+import org.apache.lucene.index.TermDocs;
+
+import java.util.BitSet;
+import java.io.IOException;
 
 /**
- * A Filter that restricts search results to values that have a matching prefix in a given
- * field.
- * 
- * <p>
- * This code borrows heavily from {@link PrefixQuery}, but is implemented as a Filter
- * 
- * </p>
+ *
+ * @version $Id$
  */
 public class PrefixFilter extends Filter {
   protected final Term prefix;
-  private PrefixQuery prefixQuery;
 
   public PrefixFilter(Term prefix) {
     this.prefix = prefix;
-    this.prefixQuery = new PrefixQuery(prefix);
   }
 
   public Term getPrefix() { return prefix; }
@@ -47,11 +44,23 @@
    * @deprecated Use {@link #getDocIdSet(IndexReader)} instead.
    */  
   public BitSet bits(IndexReader reader) throws IOException {
-    return prefixQuery.getFilter().bits(reader);
+    final BitSet bitSet = new BitSet(reader.maxDoc());
+    new PrefixGenerator(prefix) {
+      public void handleDoc(int doc) {
+        bitSet.set(doc);
+      }
+    }.generate(reader);
+    return bitSet;
   }
   
   public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
-    return prefixQuery.getFilter().getDocIdSet(reader);
+    final OpenBitSet bitSet = new OpenBitSet(reader.maxDoc());
+    new PrefixGenerator(prefix) {
+      public void handleDoc(int doc) {
+        bitSet.set(doc);
+      }
+    }.generate(reader);
+    return bitSet;
   }
 
   /** Prints a user-readable version of this query. */
@@ -62,7 +71,51 @@
     buffer.append(")");
     return buffer.toString();
   }
+}
+
+// keep this protected until I decide if it's a good way
+// to separate id generation from collection (or should
+// I just reuse hitcollector???)
+interface IdGenerator {
+  public void generate(IndexReader reader) throws IOException;
+  public void handleDoc(int doc);
+}
+
+
+abstract class PrefixGenerator implements IdGenerator {
+  protected final Term prefix;
+
+  PrefixGenerator(Term prefix) {
+    this.prefix = prefix;
+  }
 
+  public void generate(IndexReader reader) throws IOException {
+    TermEnum enumerator = reader.terms(prefix);
+    TermDocs termDocs = reader.termDocs();
+
+    try {
+
+      String prefixText = prefix.text();
+      String prefixField = prefix.field();
+      do {
+        Term term = enumerator.term();
+        if (term != null &&
+            term.text().startsWith(prefixText) &&
+            term.field() == prefixField) // interned comparison
+        {
+          termDocs.seek(term);
+          while (termDocs.next()) {
+            handleDoc(termDocs.doc());
+          }
+        } else {
+          break;
+        }
+      } while (enumerator.next());
+    } finally {
+      termDocs.close();
+      enumerator.close();
+    }
+  }
 }
 
 

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/PrefixQuery.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/PrefixQuery.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/PrefixQuery.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/PrefixQuery.java Sat Jul  4 20:08:54 2009
@@ -19,33 +19,48 @@
 
 import java.io.IOException;
 
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermEnum;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.util.ToStringUtils;
 
 /** A Query that matches documents containing terms with a specified prefix. A PrefixQuery
  * is built by QueryParser for input like <code>app*</code>. */
-public class PrefixQuery extends MultiTermQuery {
+public class PrefixQuery extends Query {
   private Term prefix;
 
   /** Constructs a query for terms starting with <code>prefix</code>. */
   public PrefixQuery(Term prefix) {
-    super(prefix);
     this.prefix = prefix;
   }
 
   /** Returns the prefix of this query. */
   public Term getPrefix() { return prefix; }
-  
-  protected FilteredTermEnum getEnum(IndexReader reader) throws IOException {
-    return new PrefixTermEnum(reader, getTerm());
-  }
 
-  public boolean equals(Object o) {
-    if (o instanceof PrefixQuery)
-      return super.equals(o);
-
-    return false;
+  public Query rewrite(IndexReader reader) throws IOException {
+    BooleanQuery query = new BooleanQuery(true);
+    TermEnum enumerator = reader.terms(prefix);
+    try {
+      String prefixText = prefix.text();
+      String prefixField = prefix.field();
+      do {
+        Term term = enumerator.term();
+        if (term != null &&
+            term.text().startsWith(prefixText) &&
+            term.field() == prefixField) // interned comparison 
+        {
+          TermQuery tq = new TermQuery(term);	  // found a match
+          tq.setBoost(getBoost());                // set the boost
+          query.add(tq, BooleanClause.Occur.SHOULD);		  // add to query
+          //System.out.println("added " + term);
+        } else {
+          break;
+        }
+      } while (enumerator.next());
+    } finally {
+      enumerator.close();
+    }
+    return query;
   }
 
   /** Prints a user-readable version of this query. */
@@ -60,4 +75,18 @@
     buffer.append(ToStringUtils.boost(getBoost()));
     return buffer.toString();
   }
+
+  /** Returns true iff <code>o</code> is equal to this. */
+  public boolean equals(Object o) {
+    if (!(o instanceof PrefixQuery))
+      return false;
+    PrefixQuery other = (PrefixQuery)o;
+    return (this.getBoost() == other.getBoost())
+      && this.prefix.equals(other.prefix);
+  }
+
+  /** Returns a hash code value for this object.*/
+  public int hashCode() {
+    return Float.floatToIntBits(getBoost()) ^ prefix.hashCode() ^ 0x6634D93C;
+  }
 }

Modified: lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/QueryTermVector.java
URL: http://svn.apache.org/viewvc/lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/QueryTermVector.java?rev=791173&r1=791172&r2=791173&view=diff
==============================================================================
--- lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/QueryTermVector.java (original)
+++ lucene/java/branches/lucene_2_4_back_compat_tests/src/java/org/apache/lucene/search/QueryTermVector.java Sat Jul  4 20:08:54 2009
@@ -29,7 +29,6 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.index.TermFreqVector;
 
 /**
@@ -59,17 +58,9 @@
       {
         List terms = new ArrayList();
         try {
-          if (stream.useNewAPI()) {
-            stream.reset();
-            TermAttribute termAtt = (TermAttribute) stream.getAttribute(TermAttribute.class);
-            while (stream.incrementToken()) {
-              terms.add(termAtt.term());
-            }
-          } else {  
-            final Token reusableToken = new Token();
-            for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
-              terms.add(nextToken.term());
-            }
+          final Token reusableToken = new Token();
+          for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
+            terms.add(nextToken.term());
           }
           processTerms((String[])terms.toArray(new String[terms.size()]));
         } catch (IOException e) {



Mime
View raw message