lucene-java-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mharw...@apache.org
Subject svn commit: r380874 [1/3] - in /lucene/java/trunk/contrib: queries/ queries/src/ queries/src/java/ queries/src/java/org/ queries/src/java/org/apache/ queries/src/java/org/apache/lucene/ queries/src/java/org/apache/lucene/search/ queries/src/java/org/ap...
Date Sat, 25 Feb 2006 00:39:24 GMT
Author: mharwood
Date: Fri Feb 24 16:39:18 2006
New Revision: 380874

URL: http://svn.apache.org/viewcvs?rev=380874&view=rev
Log:
Added XML-query-parser module for new extensible query parser that handles queries expressed as XML. Is dependent on new "queries" contrib module.

Added "queries" contrib module for various new query/filter classes. This area is also intended to consolidate existing query classes so have moved a copy of MoreLikeThis into here. Probably need to remove "similarity" module as a result, if no one objects.

Added:
    lucene/java/trunk/contrib/queries/
    lucene/java/trunk/contrib/queries/build.xml
    lucene/java/trunk/contrib/queries/readme.txt
    lucene/java/trunk/contrib/queries/src/
    lucene/java/trunk/contrib/queries/src/java/
    lucene/java/trunk/contrib/queries/src/java/org/
    lucene/java/trunk/contrib/queries/src/java/org/apache/
    lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/
    lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/
    lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/BoostingQuery.java
    lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java
    lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/TermsFilter.java
    lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/
    lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
    lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThisQuery.java
    lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/SimilarityQueries.java
    lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/package.html
    lucene/java/trunk/contrib/xml-query-parser/
    lucene/java/trunk/contrib/xml-query-parser/build.xml
    lucene/java/trunk/contrib/xml-query-parser/readme.htm
    lucene/java/trunk/contrib/xml-query-parser/src/
    lucene/java/trunk/contrib/xml-query-parser/src/java/
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/CoreParser.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/CorePlusExtensionsParser.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/DOMUtils.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/FilterBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/FilterBuilderFactory.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/FilteredQueryBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/ParserException.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/QueryBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/QueryBuilderFactory.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/BooleanQueryBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/BoostingQueryBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/ConstantScoreQueryBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/FuzzyLikeThisQueryBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/LikeThisQueryBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/RangeFilterBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/SpanBuilderBase.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/SpanFirstBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/SpanNearBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/SpanNotBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/SpanOrBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/SpanOrTermsBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/SpanQueryBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/SpanQueryBuilderFactory.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/SpanTermBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/TermQueryBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/TermsFilterBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/UserInputQueryBuilder.java
    lucene/java/trunk/contrib/xml-query-parser/src/test/
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/BooleanQuery.xml
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/BoostingQuery.xml
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/ConstantScoreQuery.xml
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/FuzzyLikeThisQuery.xml
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/LikeThisQuery.xml
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/RangeFilterQuery.xml
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/SpanQuery.xml
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TermQuery.xml
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TermsFilterQuery.xml
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/UserInputQuery.xml
    lucene/java/trunk/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/reuters21578.txt

Added: lucene/java/trunk/contrib/queries/build.xml
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/queries/build.xml?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/queries/build.xml (added)
+++ lucene/java/trunk/contrib/queries/build.xml Fri Feb 24 16:39:18 2006
@@ -0,0 +1,10 @@
+<?xml version="1.0"?>
+
+<project name="queries" default="default">
+
+  <description>
+    Queries - various query object exotica not in core
+  </description>
+
+  <import file="../contrib-build.xml"/>
+</project>

Added: lucene/java/trunk/contrib/queries/readme.txt
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/queries/readme.txt?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/queries/readme.txt (added)
+++ lucene/java/trunk/contrib/queries/readme.txt Fri Feb 24 16:39:18 2006
@@ -0,0 +1,27 @@
+This module contains a number of filter and query objects that add to core lucene.
+
+The "MoreLikeThis" class from the "similarity" module has been copied into here.
+If people are generally happy with this move then the similarity module can be deleted, or at least a 
+"Moved to queries module..." note left in its place.
+
+==== FuzzyLikeThis  - mixes the behaviour of FuzzyQuery and MoreLikeThis but with special consideration
+of fuzzy scoring factors. This generally produces good results for queries where users may provide details in a number of 
+fields and have no knowledge of boolean query syntax and also want a degree of fuzzy matching. The query is fast because, like
+MoreLikeThis, it optimizes the query to only the most distinguishing terms.
+
+==== BoostingQuery - effectively demotes search results that match a given query. 
+Unlike the "NOT" clause, this still selects documents that contain undesirable terms, 
+but reduces the overall score of docs containing these terms.
+
+
+==== TermsFilter -  Unlike a RangeFilter this can be used for filtering on multiple terms that are not necessarily in 
+a sequence. An example might be a collection of primary keys from a database query result or perhaps 
+a choice of "category" labels picked by the end user.
+
+
+
+
+Mark Harwood
+25/02/2006
+
+

Added: lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/BoostingQuery.java
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/BoostingQuery.java?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/BoostingQuery.java (added)
+++ lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/BoostingQuery.java Fri Feb 24 16:39:18 2006
@@ -0,0 +1,71 @@
+package org.apache.lucene.search;
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.Similarity;
+/**
+ * The BoostingQuery class can be used to effectively demote results that match a given query. 
+ * Unlike the "NOT" clause, this still selects documents that contain undesirable terms, 
+ * but reduces their overall score:
+ *
+ *     Query balancedQuery = new BoostingQuery(positiveQuery, negativeQuery, 0.01f);
+ * In this scenario the positiveQuery contains the mandatory, desirable criteria which is used to 
+ * select all matching documents, and the negativeQuery contains the undesirable elements which 
+ * are simply used to lessen the scores. Documents that match the negativeQuery have their score 
+ * multiplied by the supplied "boost" parameter, so this should be less than 1 to achieve a 
+ * demoting effect
+ * 
+ * This code was originally made available here: [WWW] http://marc.theaimsgroup.com/?l=lucene-user&m=108058407130459&w=2
+ * and is documented here: http://wiki.apache.org/jakarta-lucene/CommunityContributions
+ */
+public class BoostingQuery extends Query {
+    private float boost;                            // the amount to boost by
+    private Query match;                            // query to match
+    private Query context;                          // boost when matches too
+
+    public BoostingQuery(Query match, Query context, float boost) {
+      this.match = match;
+      this.context = (Query)context.clone();        // clone before boost
+      this.boost = boost;
+
+      context.setBoost(0.0f);                      // ignore context-only matches
+    }
+
+    public Query rewrite(IndexReader reader) throws IOException {
+      BooleanQuery result = new BooleanQuery() {
+
+        public Similarity getSimilarity(Searcher searcher) {
+          return new DefaultSimilarity() {
+
+            public float coord(int overlap, int max) {
+              switch (overlap) {
+
+              case 1:                               // matched only one clause
+                return 1.0f;                        // use the score as-is
+
+              case 2:                               // matched both clauses
+                return boost;                       // multiply by boost
+
+              default:
+                return 0.0f;
+                
+              }
+            }
+          };
+        }
+      };
+
+      result.add(match, BooleanClause.Occur.MUST);
+      result.add(context, BooleanClause.Occur.SHOULD);
+
+      return result;
+    }
+
+    public String toString(String field) {
+      return match.toString(field) + "/" + context.toString(field);
+    }
+  }
\ No newline at end of file

Added: lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java (added)
+++ lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java Fri Feb 24 16:39:18 2006
@@ -0,0 +1,302 @@
+package org.apache.lucene.search;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermEnum;
+import org.apache.lucene.util.PriorityQueue;
+
+/**
+ * Fuzzifies ALL terms provided as strings and then picks the best n differentiating terms.
+ * In effect this mixes the behaviour of FuzzyQuery and MoreLikeThis but with special consideration
+ * of fuzzy scoring factors.
+ * This generally produces good results for queries where users may provide details in a number of 
+ * fields and have no knowledge of boolean query syntax and also want a degree of fuzzy matching and
+ * a fast query.
+ * 
+ * For each source term the fuzzy variants are held in a BooleanQuery with no coord factor (because
+ * we are not looking for matches on multiple variants in any one doc). Additionally, a specialized
+ * TermQuery is used for variants and does not use that variant term's IDF because this would favour rarer 
+ * terms eg misspellings. Instead, all variants use the same IDF ranking (the one for the source query 
+ * term) and this is factored into the variant's boost. If the source query term does not exist in the
+ * index the average IDF of the variants is used. 
+ * @author maharwood
+ */
+public class FuzzyLikeThisQuery extends Query
+{
+    static Similarity sim=new DefaultSimilarity();
+    Query rewrittenQuery=null;
+    ArrayList fieldVals=new ArrayList();
+    Analyzer analyzer;
+    
+    ScoreTermQueue q;
+    int MAX_VARIANTS_PER_TERM=50;
+    boolean ignoreTF=false;
+
+    
+    /**
+     * 
+     * @param maxNumTerms The total number of terms clauses that will appear once rewritten as a BooleanQuery
+     * @param analyzer
+     */
+    public FuzzyLikeThisQuery(int maxNumTerms, Analyzer analyzer)
+    {
+        q=new ScoreTermQueue(maxNumTerms);
+        this.analyzer=analyzer;
+    }
+
+    class FieldVals
+    {
+    	String queryString;
+    	String fieldName;
+    	float minSimilarity;
+    	int prefixLength;
+		public FieldVals(String name, float similarity, int length, String queryString)
+		{
+			fieldName = name;
+			minSimilarity = similarity;
+			prefixLength = length;
+			this.queryString = queryString;
+		}
+    	
+    }
+    
+    /**
+     * Adds user input for "fuzzification" 
+     * @param queryString The string which will be parsed by the analyzer and for which fuzzy variants will be parsed
+     * @param fieldName
+     * @param minSimilarity The minimum similarity of the term variants (see FuzzyTermEnum)
+     * @param prefixLength Length of required common prefix on variant terms (see FuzzyTermEnum)
+     */
+    public void addTerms(String queryString, String fieldName,float minSimilarity, int prefixLength) 
+    {
+    	fieldVals.add(new FieldVals(fieldName,minSimilarity,prefixLength,queryString));
+    }
+    
+    
+    private void addTerms(IndexReader reader,FieldVals f) throws IOException
+    {
+        if(f.queryString==null) return;
+        TokenStream ts=analyzer.tokenStream(f.fieldName,new StringReader(f.queryString));
+        Token token=ts.next();
+        int corpusNumDocs=reader.numDocs();
+        Term internSavingTemplateTerm =new Term(f.fieldName,""); //optimization to avoid constructing new Term() objects
+        
+        while(token!=null)
+        {            
+            ScoreTermQueue variantsQ=new ScoreTermQueue(MAX_VARIANTS_PER_TERM); //maxNum variants considered for any one term
+            float minScore=0;
+            Term startTerm=internSavingTemplateTerm.createTerm(token.termText());
+            FuzzyTermEnum fe=new FuzzyTermEnum(reader,startTerm,f.minSimilarity,f.prefixLength);
+            TermEnum origEnum = reader.terms(startTerm);
+            int df=0;
+            if(startTerm.equals(origEnum.term()))
+            {
+                df=origEnum.docFreq(); //store the df so all variants use same idf
+            }
+            int numVariants=0;
+            int totalVariantDocFreqs=0;
+            do
+            {
+                Term possibleMatch=fe.term();
+                if(possibleMatch!=null)
+                {
+	                numVariants++;
+	                totalVariantDocFreqs+=fe.docFreq();
+	                float score=fe.difference();
+	                if(variantsQ.size() < MAX_VARIANTS_PER_TERM || score > minScore){
+	                    ScoreTerm st=new ScoreTerm(possibleMatch,score,startTerm);                    
+	                    variantsQ.insert(st);
+	                    minScore = ((ScoreTerm)variantsQ.top()).score; // maintain minScore
+	                }
+                }
+            }
+            while(fe.next());
+            if(numVariants==0)
+            {
+                //no variants to rank here
+                break;
+            }
+            int avgDf=totalVariantDocFreqs/numVariants;
+            if(df==0)//no direct match we can use as df for all variants 
+            {
+                df=avgDf; //use avg df of all variants
+            }
+            
+            // take the top variants (scored by edit distance) and reset the score
+            // to include an IDF factor then add to the global queue for ranking overall top query terms
+            int size = variantsQ.size();
+            for(int i = 0; i < size; i++)
+            {
+              ScoreTerm st = (ScoreTerm) variantsQ.pop();
+              st.score=(st.score*st.score)*sim.idf(df,corpusNumDocs);
+              q.insert(st);
+            }                            
+            token=ts.next();
+        }        
+    }
+            
+    public Query rewrite(IndexReader reader) throws IOException
+    {
+        if(rewrittenQuery!=null)
+        {
+            return rewrittenQuery;
+        }
+        //load up the list of possible terms
+        for (Iterator iter = fieldVals.iterator(); iter.hasNext();)
+		{
+			FieldVals f = (FieldVals) iter.next();
+			addTerms(reader,f);			
+		}
+        //clear the list of fields
+        fieldVals.clear();
+        
+        BooleanQuery bq=new BooleanQuery();
+        
+        
+        //create BooleanQueries to hold the variants for each token/field pair and ensure it
+        // has no coord factor
+        //Step 1: sort the termqueries by term/field
+        HashMap variantQueries=new HashMap();
+        int size = q.size();
+        for(int i = 0; i < size; i++)
+        {
+          ScoreTerm st = (ScoreTerm) q.pop();
+          ArrayList l=(ArrayList) variantQueries.get(st.fuzziedSourceTerm);
+          if(l==null)
+          {
+              l=new ArrayList();
+              variantQueries.put(st.fuzziedSourceTerm,l);
+          }
+          l.add(st);
+        }
+        //Step 2: Organize the sorted termqueries into zero-coord scoring boolean queries
+        for (Iterator iter = variantQueries.values().iterator(); iter.hasNext();)
+        {
+            ArrayList variants = (ArrayList) iter.next();
+            if(variants.size()==1)
+            {
+                //optimize where only one selected variant
+                ScoreTerm st=(ScoreTerm) variants.get(0);
+                TermQuery tq = new FuzzyTermQuery(st.term,ignoreTF);
+                tq.setBoost(st.score); // set the boost to a mix of IDF and score
+                bq.add(tq, BooleanClause.Occur.SHOULD); 
+            }
+            else
+            {
+                BooleanQuery termVariants=new BooleanQuery(true); //disable coord and IDF for these term variants
+                for (Iterator iterator2 = variants.iterator(); iterator2
+                        .hasNext();)
+                {
+                    ScoreTerm st = (ScoreTerm) iterator2.next();
+                    TermQuery tq = new FuzzyTermQuery(st.term,ignoreTF);      // found a match
+                    tq.setBoost(st.score); // set the boost using the ScoreTerm's score
+                    termVariants.add(tq, BooleanClause.Occur.SHOULD);          // add to query                    
+                }
+                bq.add(termVariants, BooleanClause.Occur.SHOULD);          // add to query
+            }
+        }
+        //TODO possible alternative step 3 - organize above booleans into a new layer of field-based
+        // booleans with a minimum-should-match of NumFields-1?
+        
+        this.rewrittenQuery=bq;
+        return bq;
+    }
+    
+    //Holds info for a fuzzy term variant - initially score is set to edit distance (for ranking best
+    // term variants) then is reset with IDF for use in ranking against all other
+    // terms/fields
+    private static class ScoreTerm{
+        public Term term;
+        public float score;
+        Term fuzziedSourceTerm;
+        
+        public ScoreTerm(Term term, float score, Term fuzziedSourceTerm){
+          this.term = term;
+          this.score = score;
+          this.fuzziedSourceTerm=fuzziedSourceTerm;
+        }
+      }
+      
+      private static class ScoreTermQueue extends PriorityQueue {        
+        public ScoreTermQueue(int size){
+          initialize(size);
+        }
+        
+        /* (non-Javadoc)
+         * @see org.apache.lucene.util.PriorityQueue#lessThan(java.lang.Object, java.lang.Object)
+         */
+        protected boolean lessThan(Object a, Object b) {
+          ScoreTerm termA = (ScoreTerm)a;
+          ScoreTerm termB = (ScoreTerm)b;
+          if (termA.score== termB.score)
+            return termA.term.compareTo(termB.term) > 0;
+          else
+            return termA.score < termB.score;
+        }
+        
+      }
+      
+      //overrides basic TermQuery to negate effects of IDF (idf is factored into boost of containing BooleanQuery)
+      private static class FuzzyTermQuery extends TermQuery
+      {
+    	  boolean ignoreTF;
+          public FuzzyTermQuery(Term t, boolean ignoreTF)
+          {
+        	  super(t);
+        	  this.ignoreTF=ignoreTF;
+          }
+          public Similarity getSimilarity(Searcher searcher)
+          {            
+              Similarity result = super.getSimilarity(searcher);
+              result = new SimilarityDelegator(result) {
+                  
+                  public float tf(float freq)
+                  {
+                	  if(ignoreTF)
+                	  {
+                          return 1; //ignore tf
+                	  }
+            		  return super.tf(freq);
+                  }
+                  public float idf(int docFreq, int numDocs)
+                  {
+                      //IDF is already factored into individual term boosts
+                      return 1;
+                  }               
+              };
+              return result;
+          }        
+      }
+      
+      
+
+    /* (non-Javadoc)
+     * @see org.apache.lucene.search.Query#toString(java.lang.String)
+     */
+    public String toString(String field)
+    {
+        return null;
+    }
+
+
+	public boolean isIgnoreTF()
+	{
+		return ignoreTF;
+	}
+
+
+	public void setIgnoreTF(boolean ignoreTF)
+	{
+		this.ignoreTF = ignoreTF;
+	}   
+    
+}

Added: lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/TermsFilter.java
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/TermsFilter.java?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/TermsFilter.java (added)
+++ lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/TermsFilter.java Fri Feb 24 16:39:18 2006
@@ -0,0 +1,51 @@
+package org.apache.lucene.search;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.Iterator;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+
+/**
+ * Constructs a filter for docs matching any of the terms added to this class. 
+ * Unlike a RangeFilter this can be used for filtering on multiple terms that are not necessarily in 
+ * a sequence. An example might be a collection of primary keys from a database query result or perhaps 
+ * a choice of "category" labels picked by the end user. As a filter, this is much faster than the 
+ * equivalent query (a BooleanQuery with many "should" TermQueries)
+ * 
+ * @author maharwood
+ */
+public class TermsFilter extends Filter
+{
+	ArrayList termsList=new ArrayList();
+	
+	/**
+	 * Adds a term to the list of acceptable terms   
+	 * @param term
+	 */
+	public void addTerm(Term term)
+	{
+		termsList.add(term);
+	}
+
+	/* (non-Javadoc)
+	 * @see org.apache.lucene.search.Filter#bits(org.apache.lucene.index.IndexReader)
+	 */
+	public BitSet bits(IndexReader reader) throws IOException
+	{
+		BitSet result=new BitSet(reader.maxDoc());
+		for (Iterator iter = termsList.iterator(); iter.hasNext();)
+		{
+			Term term = (Term) iter.next();
+			TermDocs td=reader.termDocs(term);
+	        while (td.next())
+	        {
+	            result.set(td.doc());
+	        }						
+		}
+		return result;
+	}
+}

Added: lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (added)
+++ lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java Fri Feb 24 16:39:18 2006
@@ -0,0 +1,926 @@
+/**
+ * Copyright 2004-2005 The Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search.similar;
+
+import org.apache.lucene.util.PriorityQueue;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermFreqVector;
+import org.apache.lucene.search.BooleanClause;	
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Hits;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+
+import java.util.Set;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Collection;
+import java.util.Iterator;
+import java.io.IOException;
+import java.io.Reader;
+import java.io.File;
+import java.io.PrintStream;
+import java.io.StringReader;
+import java.io.FileReader;
+import java.io.InputStreamReader;
+import java.net.URL;
+import java.util.ArrayList;
+
+
+/**
+ * Generate "more like this" similarity queries. 
+ * Based on this mail:
+ * <code><pre>
+ * Lucene does let you access the document frequency of terms, with IndexReader.docFreq().
+ * Term frequencies can be computed by re-tokenizing the text, which, for a single document,
+ * is usually fast enough.  But looking up the docFreq() of every term in the document is
+ * probably too slow.
+ * 
+ * You can use some heuristics to prune the set of terms, to avoid calling docFreq() too much,
+ * or at all.  Since you're trying to maximize a tf*idf score, you're probably most interested
+ * in terms with a high tf. Choosing a tf threshold even as low as two or three will radically
+ * reduce the number of terms under consideration.  Another heuristic is that terms with a
+ * high idf (i.e., a low df) tend to be longer.  So you could threshold the terms by the
+ * number of characters, not selecting anything less than, e.g., six or seven characters.
+ * With these sorts of heuristics you can usually find small set of, e.g., ten or fewer terms
+ * that do a pretty good job of characterizing a document.
+ * 
+ * It all depends on what you're trying to do.  If you're trying to eek out that last percent
+ * of precision and recall regardless of computational difficulty so that you can win a TREC
+ * competition, then the techniques I mention above are useless.  But if you're trying to
+ * provide a "more like this" button on a search results page that does a decent job and has
+ * good performance, such techniques might be useful.
+ * 
+ * An efficient, effective "more-like-this" query generator would be a great contribution, if
+ * anyone's interested.  I'd imagine that it would take a Reader or a String (the document's
+ * text), analyzer Analyzer, and return a set of representative terms using heuristics like those
+ * above.  The frequency and length thresholds could be parameters, etc.
+ * 
+ * Doug
+ * </pre></code>
+ *
+ *
+ * <p>
+ * <h3>Initial Usage</h3>
+ *
+ * This class has lots of options to try to make it efficient and flexible.
+ * See the body of {@link #main main()} below in the source for real code, or
+ * if you want pseudo code, the simpliest possible usage is as follows. The bold
+ * fragment is specific to this class.
+ *
+ * <code><pre>
+ *
+ * IndexReader ir = ...
+ * IndexSearcher is = ...
+ * <b>
+ * MoreLikeThis mlt = new MoreLikeThis(ir);
+ * Reader target = ... </b><em>// orig source of doc you want to find similarities to</em><b>
+ * Query query = mlt.like( target);
+ * </b>
+ * Hits hits = is.search(query);
+ * <em>// now the usual iteration thru 'hits' - the only thing to watch for is to make sure
+ * you ignore the doc if it matches your 'target' document, as it should be similar to itself </em>
+ *
+ * </pre></code>
+ *
+ * Thus you:
+ * <ol>
+ * <li> do your normal, Lucene setup for searching,
+ * <li> create a MoreLikeThis,
+ * <li> get the text of the doc you want to find similaries to
+ * <li> then call one of the like() calls to generate a similarity query
+ * <li> call the searcher to find the similar docs
+ * </ol>
+ *
+ * <h3>More Advanced Usage</h3>
+ *
+ * You may want to use {@link #setFieldNames setFieldNames(...)} so you can examine
+ * multiple fields (e.g. body and title) for similarity.
+ * <p>
+ *
+ * Depending on the size of your index and the size and makeup of your documents you
+ * may want to call the other set methods to control how the similarity queries are
+ * generated:
+ * <ul>
+ * <li> {@link #setMinTermFreq setMinTermFreq(...)}
+ * <li> {@link #setMinDocFreq setMinDocFreq(...)}
+ * <li> {@link #setMinWordLen setMinWordLen(...)}
+ * <li> {@link #setMaxWordLen setMaxWordLen(...)}
+ * <li> {@link #setMaxQueryTerms setMaxQueryTerms(...)}
+ * <li> {@link #setMaxNumTokensParsed setMaxNumTokensParsed(...)}
+ * <li> {@link #setStopWords setStopWord(...)} 
+ * </ul> 
+ *
+ * <hr>
+ * <pre>
+ * Changes: Mark Harwood 29/02/04
+ * Some bugfixing, some refactoring, some optimisation.
+ *  - bugfix: retrieveTerms(int docNum) was not working for indexes without a termvector -added missing code
+ *  - bugfix: No significant terms being created for fields with a termvector - because 
+ *            was only counting one occurence per term/field pair in calculations(ie not including frequency info from TermVector) 
+ *  - refactor: moved common code into isNoiseWord()
+ *  - optimise: when no termvector support available - used maxNumTermsParsed to limit amount of tokenization
+ * </pre>
+ * 
+ * @author David Spencer
+ * @author Bruce Ritchie
+ * @author Mark Harwood
+ */
+public final class MoreLikeThis {
+
+	/**
+	 * Default maximum number of tokens to parse in each example doc field that is not stored with TermVector support.
+	 * @see #getMaxNumTokensParsed
+	 */
+    public static final int DEFAULT_MAX_NUM_TOKENS_PARSED=5000;
+       
+
+	/**
+     * Default analyzer to parse source doc with.
+	 * @see #getAnalyzer
+     */
+    public static final Analyzer DEFAULT_ANALYZER = new StandardAnalyzer();
+
+    /**
+     * Ignore terms with less than this frequency in the source doc.
+	 * @see #getMinTermFreq
+	 * @see #setMinTermFreq	 
+     */
+    public static final int DEFAULT_MIN_TERM_FREQ = 2;
+
+    /**
+     * Ignore words which do not occur in at least this many docs.
+	 * @see #getMinDocFreq
+	 * @see #setMinDocFreq	 
+     */
+    public static final int DEFALT_MIN_DOC_FREQ = 5;
+
+    /**
+     * Boost terms in query based on score.
+	 * @see #isBoost
+	 * @see #setBoost 
+     */
+    public static final boolean DEFAULT_BOOST = false;
+
+    /**
+     * Default field names. Null is used to specify that the field names should be looked
+     * up at runtime from the provided reader.
+     */
+    public static final String[] DEFAULT_FIELD_NAMES = new String[] { "contents"};
+
+    /**
+     * Ignore words less than this length or if 0 then this has no effect.
+	 * @see #getMinWordLen
+	 * @see #setMinWordLen	 
+     */
+    public static final int DEFAULT_MIN_WORD_LENGTH = 0;
+
+    /**
+     * Ignore words greater than this length or if 0 then this has no effect.
+	 * @see #getMaxWordLen
+	 * @see #setMaxWordLen	 
+     */
+    public static final int DEFAULT_MAX_WORD_LENGTH = 0;
+
+	/**
+	 * Default set of stopwords.
+	 * If null means to allow stop words.
+	 *
+	 * @see #setStopWords
+	 * @see #getStopWords
+	 */
+	public static final Set DEFAULT_STOP_WORDS = null;
+
+	/**
+	 * Current set of stop words.
+	 */
+	private Set stopWords = DEFAULT_STOP_WORDS;
+
+    /**
+     * Return a Query with no more than this many terms.
+     *
+     * @see BooleanQuery#getMaxClauseCount
+	 * @see #getMaxQueryTerms
+	 * @see #setMaxQueryTerms	 
+     */
+    public static final int DEFAULT_MAX_QUERY_TERMS = 25;
+
+    /**
+     * Analyzer that will be used to parse the doc.
+     */
+    private Analyzer analyzer = DEFAULT_ANALYZER;
+
+    /**
+     * Ignore words less freqent that this.
+     */
+    private int minTermFreq = DEFAULT_MIN_TERM_FREQ;
+
+    /**
+     * Ignore words which do not occur in at least this many docs.
+     */
+    private int minDocFreq = DEFALT_MIN_DOC_FREQ;
+
+    /**
+     * Should we apply a boost to the Query based on the scores?
+     */
+    private boolean boost = DEFAULT_BOOST;
+
+    /**
+     * Field name we'll analyze.
+     */
+    private String[] fieldNames = DEFAULT_FIELD_NAMES;
+
+	/**
+	 * The maximum number of tokens to parse in each example doc field that is not stored with TermVector support
+	 */
+	private int maxNumTokensParsed=DEFAULT_MAX_NUM_TOKENS_PARSED;   
+    
+
+
+    /**
+     * Ignore words if less than this len.
+     */
+    private int minWordLen = DEFAULT_MIN_WORD_LENGTH;
+
+    /**
+     * Ignore words if greater than this len.
+     */
+    private int maxWordLen = DEFAULT_MAX_WORD_LENGTH;
+
+    /**
+     * Don't return a query longer than this.
+     */
+    private int maxQueryTerms = DEFAULT_MAX_QUERY_TERMS;
+
+    /**
+     * For idf() calculations.
+     */
+    private Similarity similarity = new DefaultSimilarity();
+
+    /**
+     * IndexReader to use
+     */
+    private final IndexReader ir;
+
+    /**
+     * Constructor requiring an IndexReader.
+     */
+    public MoreLikeThis(IndexReader ir) {
+        this.ir = ir;
+    }
+
+    /**
+     * Returns an analyzer that will be used to parse source doc with. The default analyzer
+     * is the {@link #DEFAULT_ANALYZER}.
+     *
+     * @return the analyzer that will be used to parse source doc with.
+	 * @see #DEFAULT_ANALYZER
+     */
+    public Analyzer getAnalyzer() {
+        return analyzer;
+    }
+
+    /**
+     * Sets the analyzer to use. An analyzer is not required for generating a query with the
+     * {@link #like(int)} method, all other 'like' methods require an analyzer.
+     *
+     * @param analyzer the analyzer to use to tokenize text.
+     */
+    public void setAnalyzer(Analyzer analyzer) {
+        this.analyzer = analyzer;
+    }
+
+    /**
+     * Returns the frequency below which terms will be ignored in the source doc. The default
+     * frequency is the {@link #DEFAULT_MIN_TERM_FREQ}.
+     *
+     * @return the frequency below which terms will be ignored in the source doc.
+     */
+    public int getMinTermFreq() {
+        return minTermFreq;
+    }
+
+    /**
+     * Sets the frequency below which terms will be ignored in the source doc.
+     *
+     * @param minTermFreq the frequency below which terms will be ignored in the source doc.
+     */
+    public void setMinTermFreq(int minTermFreq) {
+        this.minTermFreq = minTermFreq;
+    }
+
+    /**
+     * Returns the frequency at which words will be ignored which do not occur in at least this
+     * many docs. The default frequency is {@link #DEFALT_MIN_DOC_FREQ}.
+     *
+     * @return the frequency at which words will be ignored which do not occur in at least this
+     * many docs.
+     */
+    public int getMinDocFreq() {
+        return minDocFreq;
+    }
+
+    /**
+     * Sets the frequency at which words will be ignored which do not occur in at least this
+     * many docs.
+     *
+     * @param minDocFreq the frequency at which words will be ignored which do not occur in at
+     * least this many docs.
+     */
+    public void setMinDocFreq(int minDocFreq) {
+        this.minDocFreq = minDocFreq;
+    }
+
+    /**
+     * Returns whether to boost terms in query based on "score" or not. The default is
+     * {@link #DEFAULT_BOOST}.
+     *
+     * @return whether to boost terms in query based on "score" or not.
+	 * @see #setBoost
+     */
+    public boolean isBoost() {
+        return boost;
+    }
+
+    /**
+     * Sets whether to boost terms in query based on "score" or not.
+     *
+     * @param boost true to boost terms in query based on "score", false otherwise.
+	 * @see #isBoost
+     */
+    public void setBoost(boolean boost) {
+        this.boost = boost;
+    }
+
+    /**
+     * Returns the field names that will be used when generating the 'More Like This' query.
+     * The default field names that will be used is {@link #DEFAULT_FIELD_NAMES}.
+     *
+     * @return the field names that will be used when generating the 'More Like This' query.
+     */
+    public String[] getFieldNames() {
+        return fieldNames;
+    }
+
+    /**
+     * Sets the field names that will be used when generating the 'More Like This' query.
+     * Set this to null for the field names to be determined at runtime from the IndexReader
+     * provided in the constructor.
+     *
+     * @param fieldNames the field names that will be used when generating the 'More Like This'
+     * query.
+     */
+    public void setFieldNames(String[] fieldNames) {
+        this.fieldNames = fieldNames;
+    }
+
+    /**
+     * Returns the minimum word length below which words will be ignored. Set this to 0 for no
+     * minimum word length. The default is {@link #DEFAULT_MIN_WORD_LENGTH}.
+     *
+     * @return the minimum word length below which words will be ignored.
+     */
+    public int getMinWordLen() {
+        return minWordLen;
+    }
+
+    /**
+     * Sets the minimum word length below which words will be ignored.
+     *
+     * @param minWordLen the minimum word length below which words will be ignored.
+     */
+    public void setMinWordLen(int minWordLen) {
+        this.minWordLen = minWordLen;
+    }
+
+    /**
+     * Returns the maximum word length above which words will be ignored. Set this to 0 for no
+     * maximum word length. The default is {@link #DEFAULT_MAX_WORD_LENGTH}.
+     *
+     * @return the maximum word length above which words will be ignored.
+     */
+    public int getMaxWordLen() {
+        return maxWordLen;
+    }
+
+    /**
+     * Sets the maximum word length above which words will be ignored.
+     *
+     * @param maxWordLen the maximum word length above which words will be ignored.
+     */
+    public void setMaxWordLen(int maxWordLen) {
+        this.maxWordLen = maxWordLen;
+    }
+
+	/**
+	 * Set the set of stopwords.
+	 * Any word in this set is considered "uninteresting" and ignored.
+	 * Even if your Analyzer allows stopwords, you might want to tell the MoreLikeThis code to ignore them, as
+	 * for the purposes of document similarity it seems reasonable to assume that "a stop word is never interesting".
+	 * 
+	 * @param stopWords set of stopwords, if null it means to allow stop words
+	 *
+	 * @see org.apache.lucene.analysis.StopFilter#makeStopSet StopFilter.makeStopSet()
+	 * @see #getStopWords	 
+	 */
+	public void setStopWords(Set stopWords) {
+		this.stopWords = stopWords;
+	}
+
+	/**
+	 * Get the current stop words being used.
+	 * @see #setStopWords
+	 */
+	public Set getStopWords() {
+		return stopWords;
+	}
+		
+
+    /**
+     * Returns the maximum number of query terms that will be included in any generated query.
+     * The default is {@link #DEFAULT_MAX_QUERY_TERMS}.
+     *
+     * @return the maximum number of query terms that will be included in any generated query.
+     */
+    public int getMaxQueryTerms() {
+        return maxQueryTerms;
+    }
+
+    /**
+     * Sets the maximum number of query terms that will be included in any generated query.
+     *
+     * @param maxQueryTerms the maximum number of query terms that will be included in any
+     * generated query.
+     */
+    public void setMaxQueryTerms(int maxQueryTerms) {
+        this.maxQueryTerms = maxQueryTerms;
+    }
+
+	/**
+	 * @return The maximum number of tokens to parse in each example doc field that is not stored with TermVector support
+	 * @see #DEFAULT_MAX_NUM_TOKENS_PARSED
+	 */
+	public int getMaxNumTokensParsed()
+	{
+		return maxNumTokensParsed;
+	}
+
+	/**
+	 * @param i The maximum number of tokens to parse in each example doc field that is not stored with TermVector support
+	 */
+	public void setMaxNumTokensParsed(int i)
+	{
+		maxNumTokensParsed = i;
+	}
+
+
+
+
+    /**
+     * Return a query that will return docs like the passed lucene document ID.
+     *
+     * @param docNum the documentID of the lucene doc to generate the 'More Like This" query for.
+     * @return a query that will return docs like the passed lucene document ID.
+     */
+    public Query like(int docNum) throws IOException {
+        if (fieldNames == null) {
+            // gather list of valid fields from lucene
+            Collection fields = ir.getFieldNames( IndexReader.FieldOption.INDEXED);
+            fieldNames = (String[]) fields.toArray(new String[fields.size()]);
+        }
+
+        return createQuery(retrieveTerms(docNum));
+    }
+
+    /**
+     * Return a query that will return docs like the passed file.
+     *
+     * @return a query that will return docs like the passed file.
+     */
+    public Query like(File f) throws IOException {
+        if (fieldNames == null) {
+            // gather list of valid fields from lucene
+            Collection fields = ir.getFieldNames( IndexReader.FieldOption.INDEXED);
+            fieldNames = (String[]) fields.toArray(new String[fields.size()]);
+        }
+
+        return like(new FileReader(f));
+    }
+
+    /**
+     * Return a query that will return docs like the passed URL.
+     *
+     * @return a query that will return docs like the passed URL.
+     */
+    public Query like(URL u) throws IOException {
+        return like(new InputStreamReader(u.openConnection().getInputStream()));
+    }
+
+    /**
+     * Return a query that will return docs like the passed stream.
+     *
+     * @return a query that will return docs like the passed stream.
+     */
+    public Query like(java.io.InputStream is) throws IOException {
+        return like(new InputStreamReader(is));
+    }
+
+    /**
+     * Return a query that will return docs like the passed Reader.
+     *
+     * @return a query that will return docs like the passed Reader.
+     */
+    public Query like(Reader r) throws IOException {
+        return createQuery(retrieveTerms(r));
+    }
+
+    /**
+     * Create the More like query from a PriorityQueue
+     */
+    private Query createQuery(PriorityQueue q) {
+        BooleanQuery query = new BooleanQuery();
+        Object cur;
+        int qterms = 0;
+        float bestScore = 0;
+
+        while (((cur = q.pop()) != null)) {
+            Object[] ar = (Object[]) cur;
+            TermQuery tq = new TermQuery(new Term((String) ar[1], (String) ar[0]));
+
+            if (boost) {
+                if (qterms == 0) {
+                    bestScore = ((Float) ar[2]).floatValue();
+                }
+                float myScore = ((Float) ar[2]).floatValue();
+
+                tq.setBoost(myScore / bestScore);
+            }
+
+            try {
+                query.add(tq, BooleanClause.Occur.SHOULD);
+            }
+            catch (BooleanQuery.TooManyClauses ignore) {
+                break;
+            }
+
+            qterms++;
+            if (maxQueryTerms > 0 && qterms >= maxQueryTerms) {
+                break;
+            }
+        }
+
+        return query;
+    }
+
+    /**
+     * Create a PriorityQueue from a word->tf map.
+     *
+     * @param words a map of words keyed on the word(String) with Int objects as the values.
+     */
+    private PriorityQueue createQueue(Map words) throws IOException {
+        // have collected all words in doc and their freqs
+        int numDocs = ir.numDocs();
+        FreqQ res = new FreqQ(words.size()); // will order words by score
+
+        Iterator it = words.keySet().iterator();
+        while (it.hasNext()) { // for every word
+            String word = (String) it.next();
+
+            int tf = ((Int) words.get(word)).x; // term freq in the source doc
+            if (minTermFreq > 0 && tf < minTermFreq) {
+                continue; // filter out words that don't occur enough times in the source
+            }
+
+            // go through all the fields and find the largest document frequency
+            String topField = fieldNames[0];
+            int docFreq = 0;
+            for (int i = 0; i < fieldNames.length; i++) {
+                int freq = ir.docFreq(new Term(fieldNames[i], word));
+                topField = (freq > docFreq) ? fieldNames[i] : topField;
+                docFreq = (freq > docFreq) ? freq : docFreq;
+            }
+
+            if (minDocFreq > 0 && docFreq < minDocFreq) {
+                continue; // filter out words that don't occur in enough docs
+            }
+
+            if (docFreq == 0) {
+                continue; // index update problem?
+            }
+
+            float idf = similarity.idf(docFreq, numDocs);
+            float score = tf * idf;
+
+            // only really need 1st 3 entries, other ones are for troubleshooting
+            res.insert(new Object[]{word,                   // the word
+                                    topField,               // the top field
+                                    new Float(score),       // overall score
+                                    new Float(idf),         // idf
+                                    new Integer(docFreq),   // freq in all docs
+                                    new Integer(tf)
+            });
+        }
+        return res;
+    }
+
+    /**
+     * Describe the parameters that control how the "more like this" query is formed.
+     */
+    public String describeParams() {
+        StringBuffer sb = new StringBuffer();
+        sb.append("\t" + "maxQueryTerms  : " + maxQueryTerms + "\n");
+        sb.append("\t" + "minWordLen     : " + minWordLen + "\n");
+        sb.append("\t" + "maxWordLen     : " + maxWordLen + "\n");
+        sb.append("\t" + "fieldNames     : \"");
+        String delim = "";
+        for (int i = 0; i < fieldNames.length; i++) {
+            String fieldName = fieldNames[i];
+            sb.append(delim).append(fieldName);
+            delim = ", ";
+        }
+        sb.append("\n");
+        sb.append("\t" + "boost          : " + boost + "\n");
+        sb.append("\t" + "minTermFreq    : " + minTermFreq + "\n");
+        sb.append("\t" + "minDocFreq     : " + minDocFreq + "\n");
+        return sb.toString();
+    }
+
+    /**
+     * Test driver.
+     * Pass in "-i INDEX" and then either "-fn FILE" or "-url URL".
+     */
+    public static void main(String[] a) throws Throwable {
+        String indexName = "localhost_index";
+        String fn = "c:/Program Files/Apache Group/Apache/htdocs/manual/vhosts/index.html.en";
+        URL url = null;
+        for (int i = 0; i < a.length; i++) {
+            if (a[i].equals("-i")) {
+                indexName = a[++i];
+            }
+            else if (a[i].equals("-f")) {
+                fn = a[++i];
+            }
+            else if (a[i].equals("-url")) {
+                url = new URL(a[++i]);
+            }
+        }
+
+        PrintStream o = System.out;
+        IndexReader r = IndexReader.open(indexName);
+        o.println("Open index " + indexName + " which has " + r.numDocs() + " docs");
+
+        MoreLikeThis mlt = new MoreLikeThis(r);
+
+        o.println("Query generation parameters:");
+        o.println(mlt.describeParams());
+        o.println();
+
+        Query query = null;
+        if (url != null) {
+            o.println("Parsing URL: " + url);
+            query = mlt.like(url);
+        }
+        else if (fn != null) {
+            o.println("Parsing file: " + fn);
+            query = mlt.like(new File(fn));
+        }
+
+        o.println("q: " + query);
+        o.println();
+        IndexSearcher searcher = new IndexSearcher(indexName);
+
+        Hits hits = searcher.search(query);
+        int len = hits.length();
+        o.println("found: " + len + " documents matching");
+        o.println();
+        for (int i = 0; i < Math.min(25, len); i++) {
+            Document d = hits.doc(i);
+			String summary = d.get( "summary");
+            o.println("score  : " + hits.score(i));
+            o.println("url    : " + d.get("url"));
+            o.println("\ttitle  : " + d.get("title"));
+			if ( summary != null)
+				o.println("\tsummary: " + d.get("summary"));
+            o.println();
+        }
+    }
+
+    /**
+     * Find words for a more-like-this query former.
+     *
+     * @param docNum the id of the lucene document from which to find terms
+     */
+    private PriorityQueue retrieveTerms(int docNum) throws IOException {
+        Map termFreqMap = new HashMap();
+        for (int i = 0; i < fieldNames.length; i++) {
+            String fieldName = fieldNames[i];
+            TermFreqVector vector = ir.getTermFreqVector(docNum, fieldName);
+
+            // field does not store term vector info
+            if (vector == null) {
+            	Document d=ir.document(docNum);
+            	String text[]=d.getValues(fieldName);
+            	if(text!=null)
+            	{
+                for (int j = 0; j < text.length; j++) {
+                  addTermFrequencies(new StringReader(text[j]), termFreqMap, fieldName);
+                }
+            	}
+            }
+            else {
+				addTermFrequencies(termFreqMap, vector);
+            }
+
+        }
+
+        return createQueue(termFreqMap);
+    }
+
+	/**
+	 * Adds terms and frequencies found in vector into the Map termFreqMap
+	 * @param termFreqMap a Map of terms and their frequencies
+	 * @param vector List of terms and their frequencies for a doc/field
+	 */
+	private void addTermFrequencies(Map termFreqMap, TermFreqVector vector)
+	{
+		String[] terms = vector.getTerms();
+		int freqs[]=vector.getTermFrequencies();
+		for (int j = 0; j < terms.length; j++) {
+		    String term = terms[j];
+		
+			if(isNoiseWord(term)){
+				continue;
+			}
+		    // increment frequency
+		    Int cnt = (Int) termFreqMap.get(term);
+		    if (cnt == null) {
+		    	cnt=new Int();
+				termFreqMap.put(term, cnt);
+				cnt.x=freqs[j];				
+		    }
+		    else {
+		        cnt.x+=freqs[j];
+		    }
+		}
+	}
+	/**
+	 * Adds term frequencies found by tokenizing text from reader into the Map words
+	 * @param r a source of text to be tokenized
+	 * @param termFreqMap a Map of terms and their frequencies
+	 * @param fieldName Used by analyzer for any special per-field analysis
+	 */
+	private void addTermFrequencies(Reader r, Map termFreqMap, String fieldName)
+		throws IOException
+	{
+		   TokenStream ts = analyzer.tokenStream(fieldName, r);
+			org.apache.lucene.analysis.Token token;
+			int tokenCount=0;
+			while ((token = ts.next()) != null) { // for every token
+				String word = token.termText();
+				tokenCount++;
+				if(tokenCount>maxNumTokensParsed)
+				{
+					break;
+				}
+				if(isNoiseWord(word)){
+					continue;
+				}
+				
+				// increment frequency
+				Int cnt = (Int) termFreqMap.get(word);
+				if (cnt == null) {
+					termFreqMap.put(word, new Int());
+				}
+				else {
+					cnt.x++;
+				}
+			}
+	}
+	
+	
+	/** determines if the passed term is likely to be of interest in "more like" comparisons 
+	 * 
+	 * @param term The word being considered
+	 * @return true if should be ignored, false if should be used in further analysis
+	 */
+	private boolean isNoiseWord(String term)
+	{
+		int len = term.length();
+		if (minWordLen > 0 && len < minWordLen) {
+			return true;
+		}
+		if (maxWordLen > 0 && len > maxWordLen) {
+			return true;
+		}
+		if (stopWords != null && stopWords.contains( term)) {
+			return true;
+		}
+		return false;
+	}
+	
+
+    /**
+     * Find words for a more-like-this query former.
+	 * The result is a priority queue of arrays with one entry for <b>every word</b> in the document.
+	 * Each array has 6 elements.
+	 * The elements are:
+	 * <ol>
+	 * <li> The word (String)
+	 * <li> The top field that this word comes from (String)
+	 * <li> The score for this word (Float)
+	 * <li> The IDF value (Float)
+	 * <li> The frequency of this word in the index (Integer)
+	 * <li> The frequency of this word in the source document (Integer)	 	 
+	 * </ol>
+	 * This is a somewhat "advanced" routine, and in general only the 1st entry in the array is of interest.
+	 * This method is exposed so that you can identify the "interesting words" in a document.
+	 * For an easier method to call see {@link #retrieveInterestingTerms retrieveInterestingTerms()}.
+     *
+     * @param r the reader that has the content of the document
+	 * @return the most intresting words in the document ordered by score, with the highest scoring, or best entry, first
+	 *
+	 * @see #retrieveInterestingTerms
+     */
+    public PriorityQueue retrieveTerms(Reader r) throws IOException {
+        Map words = new HashMap();
+        for (int i = 0; i < fieldNames.length; i++) {
+            String fieldName = fieldNames[i];
+			addTermFrequencies(r, words, fieldName);
+        }
+        return createQueue(words);
+    }
+
+	/**
+	 * Convenience routine to make it easy to return the most interesting words in a document.
+	 * More advanced users will call {@link #retrieveTerms(java.io.Reader) retrieveTerms()} directly.
+	 * @param r the source document
+	 * @return the most interesting words in the document
+	 *
+	 * @see #retrieveTerms(java.io.Reader)
+	 * @see #setMaxQueryTerms
+	 */
+	public String[] retrieveInterestingTerms( Reader r) throws IOException {
+		ArrayList al = new ArrayList( maxQueryTerms);
+		PriorityQueue pq = retrieveTerms( r);
+		Object cur;
+		int lim = maxQueryTerms; // have to be careful, retrieveTerms returns all words but that's probably not useful to our caller...
+		// we just want to return the top words
+		while (((cur = pq.pop()) != null) && lim-- > 0) {
+            Object[] ar = (Object[]) cur;
+			al.add( ar[ 0]); // the 1st entry is the interesting word
+		}
+		String[] res = new String[ al.size()];
+		return (String[]) al.toArray( res);
+	}
+
+    /**
+     * PriorityQueue that orders words by score.
+     */
+    private static class FreqQ extends PriorityQueue {
+        FreqQ (int s) {
+            initialize(s);
+        }
+
+        protected boolean lessThan(Object a, Object b) {
+            Object[] aa = (Object[]) a;
+            Object[] bb = (Object[]) b;
+            Float fa = (Float) aa[2];
+            Float fb = (Float) bb[2];
+            return fa.floatValue() > fb.floatValue();
+        }
+    }
+
+    /**
+     * Use for frequencies and to avoid renewing Integers.
+     */
+    private static class Int {
+        int x;
+
+        Int() {
+            x = 1;
+        }
+    }
+    
+    
+}

Added: lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThisQuery.java
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThisQuery.java?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThisQuery.java (added)
+++ lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThisQuery.java Fri Feb 24 16:39:18 2006
@@ -0,0 +1,123 @@
+/*
+ * Created on 25-Jan-2006
+ */
+package org.apache.lucene.search.similar;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.similar.MoreLikeThis;
+
+/**
+ * A simple wrapper for MoreLikeThis for use in scenarios where a Query object is required eg
+ * in custom QueryParser extensions. At query.rewrite() time the reader is used to construct the
+ * actual MoreLikeThis object and obtain the real Query object.
+ * @author maharwood
+ */
+public class MoreLikeThisQuery extends Query
+{
+
+    
+    private String likeText;
+    private String[] moreLikeFields;
+    private Analyzer analyzer;
+    float percentTermsToMatch=0.3f;
+    int minTermFrequency=1;
+    int maxQueryTerms=5;
+    
+    
+    /**
+     * @param docId
+     * @param moreLikeFields
+     */
+    public MoreLikeThisQuery(String likeText, String[] moreLikeFields, Analyzer analyzer)
+    {
+        this.likeText=likeText;
+        this.moreLikeFields=moreLikeFields;
+        this.analyzer=analyzer;
+    }
+    
+    public Query rewrite(IndexReader reader) throws IOException
+    {
+        MoreLikeThis mlt=new MoreLikeThis(reader);
+        
+        mlt.setFieldNames(moreLikeFields);
+        mlt.setAnalyzer(analyzer);
+        mlt.setMinTermFreq(minTermFrequency);
+        mlt.setMaxQueryTerms(maxQueryTerms);
+        BooleanQuery bq= (BooleanQuery) mlt.like(new ByteArrayInputStream(likeText.getBytes()));        
+        BooleanClause[] clauses = bq.getClauses();
+        //make at least half the terms match
+        bq.setMinimumNumberShouldMatch((int)(clauses.length*percentTermsToMatch));
+        return bq;
+    }
+    /* (non-Javadoc)
+     * @see org.apache.lucene.search.Query#toString(java.lang.String)
+     */
+    public String toString(String field)
+    {       
+        return "like:"+likeText;
+    }
+
+	public float getPercentTermsToMatch() {
+		return percentTermsToMatch;
+	}
+	public void setPercentTermsToMatch(float percentTermsToMatch) {
+		this.percentTermsToMatch = percentTermsToMatch;
+	}
+
+	public Analyzer getAnalyzer()
+	{
+		return analyzer;
+	}
+
+	public void setAnalyzer(Analyzer analyzer)
+	{
+		this.analyzer = analyzer;
+	}
+
+	public String getLikeText()
+	{
+		return likeText;
+	}
+
+	public void setLikeText(String likeText)
+	{
+		this.likeText = likeText;
+	}
+
+	public int getMaxQueryTerms()
+	{
+		return maxQueryTerms;
+	}
+
+	public void setMaxQueryTerms(int maxQueryTerms)
+	{
+		this.maxQueryTerms = maxQueryTerms;
+	}
+
+	public int getMinTermFrequency()
+	{
+		return minTermFrequency;
+	}
+
+	public void setMinTermFrequency(int minTermFrequency)
+	{
+		this.minTermFrequency = minTermFrequency;
+	}
+
+	public String[] getMoreLikeFields()
+	{
+		return moreLikeFields;
+	}
+
+	public void setMoreLikeFields(String[] moreLikeFields)
+	{
+		this.moreLikeFields = moreLikeFields;
+	}
+}

Added: lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/SimilarityQueries.java
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/SimilarityQueries.java?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/SimilarityQueries.java (added)
+++ lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/SimilarityQueries.java Fri Feb 24 16:39:18 2006
@@ -0,0 +1,118 @@
+/**
+ * Copyright 2004 The Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.search.similar;
+
+import java.io.*;
+import java.util.*;
+import java.net.*;
+
+import org.apache.lucene.analysis.*;
+import org.apache.lucene.analysis.standard.*;
+import org.apache.lucene.document.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.*;
+
+/**
+ * Simple similarity measures.
+ *
+ *
+ * @see MoreLikeThis
+ */
+public final class SimilarityQueries
+{
+	/**
+	 *
+	 */
+	private SimilarityQueries()
+	{
+	}
+	
+	/**
+     * Simple similarity query generators.
+	 * Takes every unique word and forms a boolean query where all words are optional.
+	 * After you get this you'll use to to query your {@link IndexSearcher} for similar docs.
+	 * The only caveat is the first hit returned <b>should be</b> your source document - you'll
+	 * need to then ignore that.
+	 *
+	 * <p>
+	 *
+	 * So, if you have a code fragment like this:
+	 * <br>
+	 * <code>
+	 * Query q = formSimilaryQuery( "I use Lucene to search fast. Fast searchers are good", new StandardAnalyzer(), "contents", null);
+	 * </code>
+	 *
+	 * <p>
+	 *
+	 
+	 * The query returned, in string form, will be <code>'(i use lucene to search fast searchers are good')</code>.
+	 *
+	 * <p>
+	 * The philosophy behind this method is "two documents are similar if they share lots of words".
+	 * Note that behind the scenes, Lucenes scoring algorithm will tend to give two documents a higher similarity score if the share more uncommon words.
+	 *
+	 * <P>
+	 * This method is fail-safe in that if a long 'body' is passed in and
+	 * {@link BooleanQuery#add BooleanQuery.add()} (used internally)
+	 * throws
+	 * {@link org.apache.lucene.search.BooleanQuery.TooManyClauses BooleanQuery.TooManyClauses}, the
+	 * query as it is will be returned.
+	 *
+	 * 
+	 * 
+	 *
+	 *
+	 * @param body the body of the document you want to find similar documents to
+	 * @param a the analyzer to use to parse the body
+	 * @param field the field you want to search on, probably something like "contents" or "body"
+	 * @param stop optional set of stop words to ignore
+	 * @return a query with all unique words in 'body'
+	 * @throws IOException this can't happen...
+	 */
+    public static Query formSimilarQuery( String body,
+										  Analyzer a,
+										  String field,
+										  Set stop)
+										  throws IOException
+	{	
+		TokenStream ts = a.tokenStream( field, new StringReader( body));
+		org.apache.lucene.analysis.Token t;
+		BooleanQuery tmp = new BooleanQuery();
+		Set already = new HashSet(); // ignore dups
+		while ( (t = ts.next()) != null)
+		{
+			String word = t.termText();
+			// ignore opt stop words
+			if ( stop != null &&
+				 stop.contains( word)) continue;
+			// ignore dups
+			if ( ! already.add( word)) continue;
+			// add to query
+			TermQuery tq = new TermQuery( new Term( field, word));
+			try
+			{
+				tmp.add( tq, false, false);
+			}
+			catch( BooleanQuery.TooManyClauses too)
+			{
+				// fail-safe, just return what we have, not the end of the world
+				break;
+			}
+		}
+		return tmp;
+	}
+}

Added: lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/package.html
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/package.html?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/package.html (added)
+++ lucene/java/trunk/contrib/queries/src/java/org/apache/lucene/search/similar/package.html Fri Feb 24 16:39:18 2006
@@ -0,0 +1,5 @@
+<html>
+<body>
+Document similarity query generators.
+</body>
+</html>
\ No newline at end of file

Added: lucene/java/trunk/contrib/xml-query-parser/build.xml
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/xml-query-parser/build.xml?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/xml-query-parser/build.xml (added)
+++ lucene/java/trunk/contrib/xml-query-parser/build.xml Fri Feb 24 16:39:18 2006
@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+
+<project name="xml-query-parser" default="buildParser">
+
+  <description>
+    XML query parser
+  </description>
+
+  <import file="../contrib-build.xml"/>
+
+  <property name="queries.jar" location="../../build/contrib/queries/lucene-queries-${version}.jar"/>
+
+  <path id="classpath">
+   <pathelement path="${lucene.jar}"/>
+   <pathelement path="${queries.jar}"/>
+   <pathelement path="${project.classpath}"/>
+  </path>
+
+
+  <target name="buildParser" depends="buildQueries,default" />
+
+  <target name="buildQueries" >
+  	  <echo>XML Parser building dependency ${queries.jar}</echo>
+      <ant antfile="../queries/build.xml" target="default" inheritall="false"/>
+  </target>
+
+
+</project>

Added: lucene/java/trunk/contrib/xml-query-parser/readme.htm
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/xml-query-parser/readme.htm?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/xml-query-parser/readme.htm (added)
+++ lucene/java/trunk/contrib/xml-query-parser/readme.htm Fri Feb 24 16:39:18 2006
@@ -0,0 +1,33 @@
+<html>
+	<body>
+		<h1>XML based query syntax
+		</h1>
+		<p>
+			This module contains:
+			<ul>
+				<li>a modular Lucene Query Parser where queries are expressed as XML</li>
+				<li>JUnit test</li>
+				<li>Example XML queries</li>
+				<li>Test index (subset of Reuters 21578)</li>
+			</ul>
+		</p>
+		<p>
+			The original motivation for creating this package was outlined and discussed <a href="http://marc.theaimsgroup.com/?l=lucene-dev&m=113355526731460&w=2">here</a>.
+		</p>
+		<p>
+			Parser support includes:
+			<ul>
+				<li>"Span" queries</li>
+				<li>"Like this" queries</li>
+				<li>Boolean, Term, and UserInput (parsed with existing query parser)</li>
+				<li>BoostingQuery - a class that can downgrade scores for hits on
+				certain terms rather than the hard-line approach taken by BooleanClause.Occurs.MUST_NOT</li>
+				<li>FilteredQuery, RangeFilter, and "TermsFilter" for non-sequential terms</li>
+				<li>"FuzzyLikeThis" a new query which is a cross between "LikeThis" and "fuzzy" but with
+					better scoring of fuzzy terms than standard fuzzy queries</li>
+				<li>A modular design with expandable support for new query/filter types</li>
+			</ul>
+		</p>
+		<p>This code is dependent on the "queries" contrib module although the "CoreParser" can be compiled with just Lucene core if required</p>
+	</body>
+</html>
\ No newline at end of file

Added: lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/CoreParser.java
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/CoreParser.java?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/CoreParser.java (added)
+++ lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/CoreParser.java Fri Feb 24 16:39:18 2006
@@ -0,0 +1,124 @@
+package org.apache.lucene.xmlparser;
+
+import java.io.InputStream;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.xmlparser.builders.BooleanQueryBuilder;
+import org.apache.lucene.xmlparser.builders.ConstantScoreQueryBuilder;
+import org.apache.lucene.xmlparser.builders.RangeFilterBuilder;
+import org.apache.lucene.xmlparser.builders.SpanFirstBuilder;
+import org.apache.lucene.xmlparser.builders.SpanNearBuilder;
+import org.apache.lucene.xmlparser.builders.SpanNotBuilder;
+import org.apache.lucene.xmlparser.builders.SpanOrBuilder;
+import org.apache.lucene.xmlparser.builders.SpanOrTermsBuilder;
+import org.apache.lucene.xmlparser.builders.SpanQueryBuilderFactory;
+import org.apache.lucene.xmlparser.builders.SpanTermBuilder;
+import org.apache.lucene.xmlparser.builders.TermQueryBuilder;
+import org.apache.lucene.xmlparser.builders.UserInputQueryBuilder;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+/**
+ * Assembles a QueryBuilder which uses only core Lucene Query objects
+ * @author Mark
+ *
+ */
+public class CoreParser implements QueryBuilder
+{
+	
+	protected Analyzer analyzer;
+	protected QueryParser parser;
+	protected QueryBuilderFactory queryFactory;
+	protected FilterBuilderFactory filterFactory;
+
+	public CoreParser(Analyzer analyzer, QueryParser parser)
+	{
+		this.analyzer=analyzer;
+		this.parser=parser;
+		filterFactory = new FilterBuilderFactory();
+		filterFactory.addBuilder("RangeFilter",new RangeFilterBuilder());
+		
+		
+		queryFactory = new QueryBuilderFactory();
+		queryFactory.addBuilder("TermQuery",new TermQueryBuilder());
+		queryFactory.addBuilder("BooleanQuery",new BooleanQueryBuilder(queryFactory));
+		queryFactory.addBuilder("UserQuery",new UserInputQueryBuilder(new QueryParser("contents", analyzer)));
+		queryFactory.addBuilder("FilteredQuery",new FilteredQueryBuilder(filterFactory,queryFactory));
+		queryFactory.addBuilder("ConstantScoreQuery",new ConstantScoreQueryBuilder(filterFactory));
+		
+		SpanQueryBuilderFactory sqof=new SpanQueryBuilderFactory();
+
+		SpanNearBuilder snb=new SpanNearBuilder(sqof);
+		sqof.addBuilder("SpanNear",snb);
+		queryFactory.addBuilder("SpanNear",snb);
+
+		SpanTermBuilder snt=new SpanTermBuilder();
+		sqof.addBuilder("SpanTerm",snt);
+		queryFactory.addBuilder("SpanTerm",snt);
+		
+		SpanOrBuilder sot=new SpanOrBuilder(sqof);
+		sqof.addBuilder("SpanOr",sot);
+		queryFactory.addBuilder("SpanOr",sot);
+
+		SpanOrTermsBuilder sots=new SpanOrTermsBuilder(analyzer);
+		sqof.addBuilder("SpanOrTerms",sots);
+		queryFactory.addBuilder("SpanOrTerms",sots);		
+		
+		SpanFirstBuilder sft=new SpanFirstBuilder(sqof);
+		sqof.addBuilder("SpanFirst",sft);
+		queryFactory.addBuilder("SpanFirst",sft);
+		
+		SpanNotBuilder snot=new SpanNotBuilder(sqof);
+		sqof.addBuilder("SpanNot",snot);
+		queryFactory.addBuilder("SpanNot",snot);	
+	}
+	
+	public Query parse(InputStream xmlStream) throws ParserException
+	{
+		return getQuery(parseXML(xmlStream).getDocumentElement());
+	}
+	
+	public void addQueryBuilder(String nodeName,QueryBuilder builder)
+	{
+		queryFactory.addBuilder(nodeName,builder);
+	}
+	public void addFilterBuilder(String nodeName,FilterBuilder builder)
+	{
+		filterFactory.addBuilder(nodeName,builder);
+	}
+	
+	private static Document parseXML(InputStream pXmlFile) throws ParserException
+	{
+		DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+		DocumentBuilder db = null;
+		try
+		{
+			db = dbf.newDocumentBuilder();
+		}
+		catch (Exception se)
+		{
+			throw new ParserException("XML Parser configuration error", se);
+		}
+		org.w3c.dom.Document doc = null;
+		try
+		{
+			doc = db.parse(pXmlFile);
+		}
+		catch (Exception se)
+		{
+			throw new ParserException("Error parsing XML stream:" + se, se);
+		}
+		return doc;
+	}
+	
+
+	public Query getQuery(Element e) throws ParserException
+	{
+		return queryFactory.getQuery(e);
+	}
+}

Added: lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/CorePlusExtensionsParser.java
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/CorePlusExtensionsParser.java?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/CorePlusExtensionsParser.java (added)
+++ lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/CorePlusExtensionsParser.java Fri Feb 24 16:39:18 2006
@@ -0,0 +1,25 @@
+package org.apache.lucene.xmlparser;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.xmlparser.builders.BoostingQueryBuilder;
+import org.apache.lucene.xmlparser.builders.FuzzyLikeThisQueryBuilder;
+import org.apache.lucene.xmlparser.builders.LikeThisQueryBuilder;
+import org.apache.lucene.xmlparser.builders.TermsFilterBuilder;
+
+public class CorePlusExtensionsParser extends CoreParser
+{
+
+	public CorePlusExtensionsParser(Analyzer analyzer, QueryParser parser)
+	{
+		super(analyzer, parser);
+		filterFactory.addBuilder("TermsFilter",new TermsFilterBuilder(analyzer));
+		String fields[]={"contents"};
+		queryFactory.addBuilder("LikeThisQuery",new LikeThisQueryBuilder(analyzer,fields));
+		queryFactory.addBuilder("BoostingQuery", new BoostingQueryBuilder(queryFactory));
+		queryFactory.addBuilder("FuzzyLikeThisQuery", new FuzzyLikeThisQueryBuilder(analyzer));
+		
+	}
+
+
+}

Added: lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/DOMUtils.java
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/DOMUtils.java?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/DOMUtils.java (added)
+++ lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/DOMUtils.java Fri Feb 24 16:39:18 2006
@@ -0,0 +1,198 @@
+package org.apache.lucene.xmlparser;
+import java.io.Reader;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.xml.sax.InputSource;
+
+public class DOMUtils
+{
+	/* Convenience method where there is only one child Element of a given name */
+	public static Element getChildByTagName(Element e, String name)
+	{
+	       for (Node kid = e.getFirstChild(); kid != null; kid = kid.getNextSibling())
+		{
+			if( (kid.getNodeType()==Node.ELEMENT_NODE) && (name.equals(kid.getNodeName())) )
+			{
+				return (Element)kid;
+			}
+		}
+		return null;
+	}
+
+	/**
+	 * Returns an attribute value from this node, or first parent node with this attribute defined
+	 * @param element 
+	 * @param attributeName
+	 * @return A non-zero-length value if defined, otherwise null
+	 */
+	public static String getAttributeWithInheritance(Element element, String attributeName)
+	{
+		String result=element.getAttribute(attributeName);
+		if( (result==null)|| ("".equals(result) ) )
+		{
+			Node n=element.getParentNode();
+			if((n==element)||(n==null))
+			{
+				return null;
+			}
+			Element parent=(Element) n;
+			return getAttributeWithInheritance(parent,attributeName);
+		}
+		return result;		
+	}
+
+
+
+	/* Convenience method where there is only one child Element of a given name */
+	public static String getChildTextByTagName(Element e, String tagName)
+	{
+		Element child=getChildByTagName(e,tagName);
+		if(child!=null)
+		{
+			return getText(child);
+		}
+		return null;
+	}
+
+	/* Convenience method to append a new child with text*/
+	public static Element insertChild(Element parent, String tagName, String text)
+	{
+	  	Element child = parent.getOwnerDocument().createElement(tagName);
+		parent.appendChild(child);
+		if(text!=null)
+		{
+		  	child.appendChild(child.getOwnerDocument().createTextNode(text));
+		}
+		return child;
+	}
+
+	public static String getAttribute(Element element, String attributeName, String deflt)
+	{
+		String result=element.getAttribute(attributeName);
+		if( (result==null)|| ("".equals(result) ) )
+		{
+			return deflt;
+		}
+		return result;
+	}
+	public static float getAttribute(Element element, String attributeName, float deflt)
+	{
+		String result=element.getAttribute(attributeName);
+		if( (result==null)|| ("".equals(result) ) )
+		{
+			return deflt;
+		}
+		return Float.parseFloat(result);
+	}	
+
+	public static int getAttribute(Element element, String attributeName, int deflt)
+	{
+		String result=element.getAttribute(attributeName);
+		if( (result==null)|| ("".equals(result) ) )
+		{
+			return deflt;
+		}
+		return Integer.parseInt(result);
+	}
+	
+	public static boolean getAttribute(Element element, String attributeName,
+			boolean deflt)
+	{
+		String result = element.getAttribute(attributeName);
+		if ((result == null) || ("".equals(result)))
+		{
+			return deflt;
+		}
+		return Boolean.getBoolean(result);
+	}	
+
+	/* Returns text of node and all child nodes - without markup */
+	//MH changed to Node from Element 25/11/2005
+	public static String getText(Node e)
+	{
+		StringBuffer sb=new StringBuffer();
+		getTextBuffer(e, sb);
+		return sb.toString();
+	}
+	
+	public static Element getFirstChildElement(Element element)
+	{
+		for (Node kid = element.getFirstChild(); kid != null; kid = kid
+				.getNextSibling())
+		{
+			if (kid.getNodeType() == Node.ELEMENT_NODE) 
+			{
+				return (Element) kid;
+			}
+		}
+		return null;
+	}	
+
+	private static void getTextBuffer(Node e, StringBuffer sb)
+	{
+	    for (Node kid = e.getFirstChild(); kid != null; kid = kid.getNextSibling())
+		{
+			switch(kid.getNodeType())
+			{
+				case Node.TEXT_NODE:
+				{
+					sb.append(kid.getNodeValue());
+					break;
+				}
+				case Node.ELEMENT_NODE:
+				{
+					getTextBuffer(kid, sb);
+					break;
+				}
+				case Node.ENTITY_REFERENCE_NODE:
+				{
+					getTextBuffer(kid, sb);
+					break;
+				}
+			}
+		}
+	}
+
+	/**
+	* Helper method to parse an XML file into a DOM tree, given a filename.
+	* @param pXmlFile name of the XML file to be parsed
+	* @return an org.w3c.dom.Document object
+	*/
+	public static Document loadXML(Reader is)
+	{
+
+		DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+		DocumentBuilder db = null;
+		
+		try
+		{
+			db = dbf.newDocumentBuilder();
+		}
+		catch (Exception se)
+		{
+			throw new RuntimeException("Parser configuration error", se);
+		}
+
+		// Step 3: parse the input file
+		org.w3c.dom.Document doc = null;
+		try
+		{
+			doc = db.parse(new InputSource(is));
+			//doc = db.parse(is);
+		}
+		catch (Exception se)
+		{
+			throw new RuntimeException("Error parsing file:" + se, se);
+		}
+
+		return doc;
+	}
+}
+
+
+

Added: lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/FilterBuilder.java
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/FilterBuilder.java?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/FilterBuilder.java (added)
+++ lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/FilterBuilder.java Fri Feb 24 16:39:18 2006
@@ -0,0 +1,14 @@
+/*
+ * Created on 25-Jan-2006
+ */
+package org.apache.lucene.xmlparser;
+
+import org.apache.lucene.search.Filter;
+import org.w3c.dom.Element;
+
+/**
+ * @author maharwood
+ */
+public interface FilterBuilder {
+	 public Filter getFilter(Element e) throws ParserException;
+}

Added: lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/FilterBuilderFactory.java
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/FilterBuilderFactory.java?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/FilterBuilderFactory.java (added)
+++ lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/FilterBuilderFactory.java Fri Feb 24 16:39:18 2006
@@ -0,0 +1,31 @@
+/*
+ * Created on 25-Jan-2006
+ */
+package org.apache.lucene.xmlparser;
+
+import java.util.HashMap;
+
+import org.apache.lucene.search.Filter;
+import org.w3c.dom.Element;
+
+/**
+ * @author maharwood
+ */
+public class FilterBuilderFactory implements FilterBuilder {
+
+	HashMap builders=new HashMap();
+	
+	public Filter getFilter(Element n) throws ParserException {
+		FilterBuilder builder=(FilterBuilder) builders.get(n.getNodeName());
+		if(builder==null)
+		{
+			throw new ParserException("No FilterBuilder defined for node "+n.getNodeName()); 
+		}
+		return builder.getFilter(n); 
+	}
+	public void addBuilder(String nodeName,FilterBuilder builder)
+	{
+		builders.put(nodeName,builder);
+	}
+	
+}

Added: lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/FilteredQueryBuilder.java
URL: http://svn.apache.org/viewcvs/lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/FilteredQueryBuilder.java?rev=380874&view=auto
==============================================================================
--- lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/FilteredQueryBuilder.java (added)
+++ lucene/java/trunk/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/FilteredQueryBuilder.java Fri Feb 24 16:39:18 2006
@@ -0,0 +1,71 @@
+/*
+ * Created on 25-Jan-2006
+ */
+package org.apache.lucene.xmlparser;
+
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.FilteredQuery;
+import org.apache.lucene.search.Query;
+import org.w3c.dom.Element;
+
+
+/**
+ * @author maharwood
+ */
+public class FilteredQueryBuilder implements QueryBuilder {
+	
+	private FilterBuilder filterFactory;
+	private QueryBuilder queryFactory;
+
+	public FilteredQueryBuilder(FilterBuilder filterFactory, QueryBuilder queryFactory)
+	{
+		this.filterFactory=filterFactory;
+		this.queryFactory=queryFactory;
+		
+	}
+
+	/* (non-Javadoc)
+	 * @see org.apache.lucene.xmlparser.QueryObjectBuilder#process(org.w3c.dom.Element)
+	 */
+	public Query getQuery(Element e) throws ParserException {
+		Element filterElement=DOMUtils.getChildByTagName(e,"Filter");
+		if(filterElement==null)
+		{
+			throw new ParserException("FilteredQuery missing \"Filter\" child element");
+		}
+		filterElement=DOMUtils.getFirstChildElement(filterElement);
+		Filter f=null;
+		if(filterElement!=null)
+		{
+			f=filterFactory.getFilter(filterElement);
+		}
+		else
+		{
+			throw new ParserException("FilteredQuery \"Filter\" element missing child query element ");
+		}
+		
+		
+		Element queryElement=DOMUtils.getChildByTagName(e,"Query");
+		if(queryElement==null)
+		{
+			throw new ParserException("FilteredQuery missing \"Query\" child element");
+		}
+		queryElement=DOMUtils.getFirstChildElement(queryElement);
+		Query q=null;
+		if(queryElement!=null)
+		{
+			q=queryFactory.getQuery(queryElement);
+		}
+		else
+		{
+			throw new ParserException("FilteredQuery \"Query\" element missing child query element ");
+		}
+
+		
+		FilteredQuery fq = new FilteredQuery(q,f);
+		fq.setBoost(DOMUtils.getAttribute(e,"boost",1.0f));
+		return fq;
+
+	}
+
+}



Mime
View raw message